text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# Copyright 2015 Adam Greenstein <adamgreenstein@comcast.net>
#
# Switcharoo Cartographer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Switcharoo Cartographer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Switcharoo Cartographer. If not, see <http://www.gnu.org/licenses/>.
from data import Access, Entry, EntryError
from Queue import Queue
class EntryQueue(Queue):
def __init__(self, transverse, maxsize=0):
self.reddit = transverse.reddit
self.events = transverse.events
Queue.__init__(self, maxsize)
def _init(self, maxsize):
Queue._init(self, maxsize)
nodes = Access(self.events).get_entry(maxsize)
for node in nodes:
try:
self.queue.append(Entry(node['raw_url'], self.reddit))
self.events.on_adding_to_queue(node['raw_url'])
except EntryError:
# TODO Remove old entry from DB
pass
def _put(self, url):
try:
entry = Entry(url, self.reddit)
if self._is_unique(entry):
self.events.on_adding_to_queue(url)
self.queue.append(entry)
else:
self.events.on_not_adding_to_queue(url)
except EntryError:
self.events.on_not_adding_to_queue(url)
def _get(self):
return self.queue.popleft()
def _is_unique(self, entry):
# TODO Logic here to determine if new url found
if entry not in self.queue:
return Access(self.events).is_unique_entry(entry)
else:
return False
|
admgrn/Switcharoo
|
scraper/scraper/entryqueue.py
|
Python
|
gpl-3.0
| 2,041
| 0.00147
|
class A:
def <weak_warning descr="Function name should be lowercase">fooBar</weak_warning>(self): pass
class B(A):
def fooBar(self): pass
|
kdwink/intellij-community
|
python/testData/inspections/PyPep8NamingInspection/overridden.py
|
Python
|
apache-2.0
| 142
| 0.070423
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import httplib
import urllib
import time
import re
from tweepy.error import TweepError
from tweepy.utils import convert_to_utf8_str
from tweepy.models import Model
re_path_template = re.compile('{\w+}')
def bind_api(**config):
class APIMethod(object):
path = config['path']
payload_type = config.get('payload_type', None)
payload_list = config.get('payload_list', False)
allowed_param = config.get('allowed_param', [])
method = config.get('method', 'GET')
require_auth = config.get('require_auth', False)
search_api = config.get('search_api', False)
use_cache = config.get('use_cache', True)
def __init__(self, api, args, kargs):
# If authentication is required and no credentials
# are provided, throw an error.
if self.require_auth and not api.auth:
raise TweepError('Authentication required!')
self.api = api
self.post_data = kargs.pop('post_data', None)
self.retry_count = kargs.pop('retry_count', api.retry_count)
self.retry_delay = kargs.pop('retry_delay', api.retry_delay)
self.retry_errors = kargs.pop('retry_errors', api.retry_errors)
self.headers = kargs.pop('headers', {})
self.build_parameters(args, kargs)
# Pick correct URL root to use
if self.search_api:
self.api_root = api.search_root
else:
self.api_root = api.api_root
# Perform any path variable substitution
self.build_path()
if api.secure:
self.scheme = 'https://'
else:
self.scheme = 'http://'
if self.search_api:
self.host = api.search_host
else:
self.host = api.host
# Manually set Host header to fix an issue in python 2.5
# or older where Host is set including the 443 port.
# This causes Twitter to issue 301 redirect.
# See Issue https://github.com/tweepy/tweepy/issues/12
self.headers['Host'] = self.host
def build_parameters(self, args, kargs):
self.parameters = {}
for idx, arg in enumerate(args):
if arg is None:
continue
try:
self.parameters[self.allowed_param[idx]] = convert_to_utf8_str(arg)
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kargs.items():
if arg is None:
continue
if k in self.parameters:
raise TweepError('Multiple values for parameter %s supplied!' % k)
self.parameters[k] = convert_to_utf8_str(arg)
def build_path(self):
for variable in re_path_template.findall(self.path):
name = variable.strip('{}')
if name == 'user' and 'user' not in self.parameters and self.api.auth:
# No 'user' parameter provided, fetch it from Auth instead.
value = self.api.auth.get_username()
else:
try:
value = urllib.quote(self.parameters[name])
except KeyError:
raise TweepError('No parameter value found for path variable: %s' % name)
del self.parameters[name]
self.path = self.path.replace(variable, value)
def execute(self):
# Build the request URL
url = self.api_root + self.path
if len(self.parameters):
url = '%s?%s' % (url, urllib.urlencode(self.parameters))
# Query the cache if one is available
# and this request uses a GET method.
if self.use_cache and self.api.cache and self.method == 'GET':
cache_result = self.api.cache.get(url)
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
if isinstance(result, Model):
result._api = self.api
else:
if isinstance(cache_result, Model):
cache_result._api = self.api
return cache_result
# Continue attempting request until successful
# or maximum number of retries is reached.
retries_performed = 0
while retries_performed < self.retry_count + 1:
# Open connection
# FIXME: add timeout
if self.api.secure:
conn = httplib.HTTPSConnection(self.host)
else:
conn = httplib.HTTPConnection(self.host)
# Apply authentication
if self.api.auth:
self.api.auth.apply_auth(
self.scheme + self.host + url,
self.method, self.headers, self.parameters
)
# Execute request
try:
conn.request(self.method, url, headers=self.headers, body=self.post_data)
resp = conn.getresponse()
except Exception, e:
raise TweepError('Failed to send request: %s' % e)
# Exit request loop if non-retry error code
if self.retry_errors:
if resp.status not in self.retry_errors: break
else:
if resp.status == 200: break
# Sleep before retrying request again
time.sleep(self.retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
self.api.last_response = resp
if resp.status != 200:
try:
error_msg = self.api.parser.parse_error(resp.read())
except Exception:
error_msg = "Twitter error response: status code = %s" % resp.status
raise TweepError(error_msg, resp)
# Parse the response payload
result = self.api.parser.parse(self, resp.read())
conn.close()
# Store result into cache if one is available.
if self.use_cache and self.api.cache and self.method == 'GET' and result:
self.api.cache.store(url, result)
return result
def _call(api, *args, **kargs):
method = APIMethod(api, args, kargs)
return method.execute()
# Set pagination mode
if 'cursor' in APIMethod.allowed_param:
_call.pagination_mode = 'cursor'
elif 'page' in APIMethod.allowed_param:
_call.pagination_mode = 'page'
return _call
|
olemoudi/tweetdigest
|
tweepy/tweepy/binder.py
|
Python
|
apache-2.0
| 7,174
| 0.001812
|
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404
from django.db.models.sql.constants import QUERY_TERMS, LOOKUP_SEP
from django.http import HttpResponse
from django.utils.cache import patch_cache_control
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie.fields import *
from tastypie.http import *
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module was added in Python 2.5 and ``copycompat`` was added in
# post 1.1.1 Django (r11901)
try:
from django.utils.copycompat import deepcopy
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from copy import deepcopy
def csrf_exempt(func):
return func
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
allowed_methods = ['get', 'post', 'put', 'delete']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
if not overrides.get('queryset', None) is None:
overrides['object_class'] = overrides['queryset'].model
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
for p in parents:
fields = getattr(p, 'base_fields', {})
for field_name, field_object in fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
if isinstance(obj, ApiField):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
if request.is_ajax():
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, ApiFieldError), e:
return HttpBadRequest(e.args[0])
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
if settings.DEBUG:
data = {
"error_message": exception.message,
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return HttpApplicationError(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins.
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return HttpApplicationError(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
# Due to the way Django parses URLs, ``get_multiple`` won't work without
# a trailing slash.
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<pk_list>\w[\w/;-]*)/$" % self._meta.resource_name, self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
A hook for adding your own URLs or overriding the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.override_urls() + self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
return self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return HttpAccepted()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
if not request_method in allowed:
raise ImmediateHttpResponse(response=HttpMethodNotAllowed())
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=HttpForbidden())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj, data)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
# URL-related methods.
def get_resource_uri(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
A ``return reverse("api_dispatch_detail", kwargs={'resource_name':
self.resource_name, 'pk': object.id})`` should be all that would
be needed.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def get_resource_list_uri(self):
"""
Returns a URL specific to this resource's list endpoint.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
try:
return self._build_reverse_url("api_dispatch_list", kwargs=kwargs)
except NoReverseMatch:
return None
def get_via_uri(self, uri):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
try:
view, args, kwargs = resolve(uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(**self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, obj):
"""
Given an object instance, extract the information from it to populate
the resource.
"""
bundle = Bundle(obj=obj)
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if isinstance(field_object, RelatedField):
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
for field_name, field_object in self.fields.items():
if field_object.attribute:
value = field_object.hydrate(bundle)
if value is not None:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
setattr(bundle.obj, field_object.attribute, value.obj)
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
bundle = self.hydrate(bundle)
return bundle
def hydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
}
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if len(errors):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = Paginator(request.GET, sorted_objects, resource_uri=self.get_resource_list_uri(),
limit=self._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
to_be_serialized['objects'] = [self.full_dehydrate(obj=obj) for obj in to_be_serialized['objects']]
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return HttpGone()
except MultipleObjectsReturned:
return HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.full_dehydrate(obj)
return self.create_response(request, bundle)
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpAccepted`` (204 No Content).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
if not 'objects' in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized['objects']:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data))
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.is_valid(bundle, request)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
self.obj_create(bundle, request=request)
bundles_seen.append(bundle)
return HttpAccepted()
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If an existing resource is modified, return ``HttpAccepted`` (204 No Content).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))
self.is_valid(bundle, request)
try:
updated_bundle = self.obj_update(bundle, request=request, pk=kwargs.get('pk'))
return HttpAccepted()
except:
updated_bundle = self.obj_create(bundle, request=request, pk=kwargs.get('pk'))
return HttpCreated(location=self.get_resource_uri(updated_bundle))
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))
self.is_valid(bundle, request)
updated_bundle = self.obj_create(bundle, request=request)
return HttpCreated(location=self.get_resource_uri(updated_bundle))
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpAccepted`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return HttpAccepted()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpAccepted`` (204 No Content).
If the resource did not exist, return ``HttpGone`` (410 Gone).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return HttpAccepted()
except NotFound:
return HttpGone()
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
obj_pks = kwargs.get('pk_list', '').split(';')
objects = []
not_found = []
for pk in obj_pks:
try:
obj = self.obj_get(request, pk=pk)
bundle = self.full_dehydrate(obj)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(pk)
object_list = {
'objects': objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(fields) and not field_name in fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
if f.get_internal_type() in ('DateField', 'DateTimeField'):
result = DateTimeField
elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'):
result = BooleanField
elif f.get_internal_type() in ('DecimalField', 'FloatField'):
result = FloatField
elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'):
result = IntegerField
elif f.get_internal_type() in ('FileField', 'ImageField'):
result = FileField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif f.get_internal_type() == 'ForeignKey':
# result = ForeignKey
# elif f.get_internal_type() == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
if not filter_bits[0] in self.fields:
# It's not a field we know about. Move along citizen.
continue
if not filter_bits[0] in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % filter_bits[0])
if filter_bits[-1] in QUERY_TERMS.keys():
filter_type = filter_bits.pop()
else:
filter_type = 'exact'
# Check to see if it's allowed lookup type.
if not self._meta.filtering[filter_bits[0]] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[filter_bits[0]]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_expr, filter_bits[0]))
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits) > 1:
if not self._meta.filtering[filter_bits[0]] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % filter_bits[0])
if self.fields[filter_bits[0]].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % filter_bits[0])
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
db_field_name = LOOKUP_SEP.join([self.fields[filter_bits[0]].attribute] + filter_bits[1:])
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``sort_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
if not 'sort_by' in options:
# Nothing to alter the sort order. Return what we've got.
return obj_list
order_by_args = []
if hasattr(options, 'getlist'):
sort_bits = options.getlist('sort_by')
else:
sort_bits = options.get('sort_by')
if not isinstance(sort_bits, (list, tuple)):
sort_bits = [sort_bits]
for sort_by in sort_bits:
sort_by_bits = sort_by.split(LOOKUP_SEP)
field_name = sort_by_bits[0]
order = ''
if sort_by_bits[0].startswith('-'):
field_name = sort_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + sort_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by authorization or other
overrides.
"""
base_object_list = self._meta.queryset
# Limit it as needed.
authed_object_list = self.apply_authorization_limits(request, base_object_list)
return authed_object_list
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = None
if hasattr(request, 'GET'):
filters = request.GET
applicable_filters = self.build_filters(filters=filters)
try:
return self.get_object_list(request).filter(**applicable_filters)
except ValueError, e:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
return self.get_object_list(request).get(**kwargs)
except ValueError, e:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_update(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not bundle.obj.pk:
# Attempt to hydrate data from kwargs before doing a lookup for the object.
# This step is needed so certain values (like datetime) will pass model validation.
try:
bundle.obj = self.get_object_list(request).model()
bundle.data.update(kwargs)
bundle = self.full_hydrate(bundle)
lookup_kwargs = kwargs.copy()
lookup_kwargs.update(dict(
(k, getattr(bundle.obj, k))
for k in kwargs.keys()
if getattr(bundle.obj, k) is not None))
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.get_object_list(request).get(**lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
self.get_object_list(request).filter(**kwargs).delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
obj = self.get_object_list(request).get(**kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and getattr(bundle.obj, 'pk', None):
bundle.obj.delete()
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
# Get the manager.
related_mngr = getattr(bundle.obj, field_object.attribute)
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def get_resource_uri(self, bundle_or_obj):
"""
Handles generating a resource URI for a single resource.
Uses the model's ``pk`` in order to create the URI.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.pk
else:
kwargs['pk'] = bundle_or_obj.id
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_put(request):
"""
Force Django to process the PUT.
"""
if request.method == "PUT":
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
return request
|
colinsullivan/bingo-board
|
bingo_board/tastypie/resources.py
|
Python
|
mit
| 58,556
| 0.007121
|
from django.urls import re_path
from systextil.views import apoio_index
from systextil.views.table import (
deposito,
colecao,
estagio,
periodo_confeccao,
unidade,
)
urlpatterns = [
re_path(r'^colecao/$', colecao.view, name='colecao'),
re_path(r'^deposito/$', deposito.deposito, name='deposito'),
re_path(r'^estagio/$', estagio.view, name='estagio'),
re_path(r'^periodo_confeccao/$', periodo_confeccao.view, name='periodo_confeccao'),
re_path(r'^unidade/$', unidade.view, name='unidade'),
]
|
anselmobd/fo2
|
src/systextil/urls/table.py
|
Python
|
mit
| 540
| 0.001852
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RFutileOptions(RPackage):
"""A scoped options management framework"""
homepage = "https://cran.rstudio.com/web/packages/futile.options/index.html"
url = "https://cran.rstudio.com/src/contrib/futile.options_1.0.0.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/futile.options"
version('1.0.0', '8fd845774bbce56f41f7c43c3b4c13ba')
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/r-futile-options/package.py
|
Python
|
lgpl-2.1
| 1,636
| 0.001834
|
import numpy as np
import pandas as pd
from bokeh import mpl
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD'))
df = df.cumsum()
df.plot(legend=False)
mpl.to_bokeh(name="dataframe")
|
the13fools/Bokeh_Examples
|
pandas/dataframe.py
|
Python
|
bsd-3-clause
| 318
| 0.006289
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-04 22:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('domain_api', '0009_remove_topleveldomain_slug'),
]
operations = [
migrations.AddField(
model_name='topleveldomain',
name='slug',
field=models.CharField(max_length=100, null=True),
),
]
|
heytrav/drs-project
|
domain_api/migrations/0010_topleveldomain_slug.py
|
Python
|
mit
| 478
| 0
|
'''
Created on Nov 21, 2016
@author: julimatt
'''
from django import forms
|
zibawa/zibawa
|
simulator/forms.py
|
Python
|
gpl-3.0
| 76
| 0
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("_nbt", ["_nbt.pyx"])]
import numpy
setup(
name = 'NBT library (Cython implementation)',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
include_dirs = numpy.get_include()
)
|
codewarrior0/pymclevel
|
setup_nbt.py
|
Python
|
isc
| 344
| 0.02907
|
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import six
from lxml import etree
__all__ = ('FileExistsInProjectError', 'FileNotInProjectError', 'ProjectStore')
class FileExistsInProjectError(Exception):
pass
class FileNotInProjectError(Exception):
pass
class ProjectStore(object):
"""Basic project file container."""
# INITIALIZERS #
def __init__(self):
self._files = {}
self._sourcefiles = []
self._targetfiles = []
self._transfiles = []
self.settings = {}
self.convert_map = {}
# The above map maps the conversion of input files (keys) to its output
# file and template used (2-tuple). All values are project file names.
# eg. convert_map = {
# 'sources/doc.odt': ('trans/doc.odt.xlf', None),
# 'trans/doc.odt.xlf': ('targets/doc.odt', 'sources/doc.odt')
#}
# The following dict groups together sets of mappings from a file
# "type" string ("src", "tgt" or "trans") to various other values
# or objects.
self.TYPE_INFO = {
# type => prefix for new files
'f_prefix': {
'src': 'sources/',
'tgt': 'targets/',
'trans': 'trans/',
},
# type => list containing filenames for that type
'lists': {
'src': self._sourcefiles,
'tgt': self._targetfiles,
'trans': self._transfiles,
},
# type => next type in process: src => trans => tgt
'next_type': {
'src': 'trans',
'trans': 'tgt',
'tgt': None,
},
# type => name of the sub-section in the settings file/dict
'settings': {
'src': 'sources',
'tgt': 'targets',
'trans': 'transfiles',
}
}
def __del__(self):
try:
self.close()
except Exception:
pass
# ACCESSORS #
def _get_sourcefiles(self):
"""Read-only access to ``self._sourcefiles``."""
return tuple(self._sourcefiles)
sourcefiles = property(_get_sourcefiles)
def _get_targetfiles(self):
"""Read-only access to ``self._targetfiles``."""
return tuple(self._targetfiles)
targetfiles = property(_get_targetfiles)
def _get_transfiles(self):
"""Read-only access to ``self._transfiles``."""
return tuple(self._transfiles)
transfiles = property(_get_transfiles)
# SPECIAL METHODS #
def __in__(self, lhs):
"""@returns ``True`` if ``lhs`` is a file name or file object in the project store."""
return (lhs in self._sourcefiles or
lhs in self._targetfiles or
lhs in self._transfiles or
lhs in self._files or
lhs in self._files.values())
# METHODS #
def append_file(self, afile, fname, ftype='trans', delete_orig=False):
"""Append the given file to the project with the given filename, marked
to be of type ``ftype`` ('src', 'trans', 'tgt').
:type delete_orig: bool
:param delete_orig: Whether or not the original (given) file should be
deleted after being appended. This is set to
``True`` by
:meth:`~translate.storage.project.convert_forward`
. Not used in this class.
"""
if ftype not in self.TYPE_INFO['f_prefix']:
raise ValueError('Invalid file type: %s' % (ftype))
if isinstance(afile, six.string_types) and os.path.isfile(afile) and not fname:
# Try and use afile as the file name
fname, afile = afile, open(afile)
# Check if we can get an real file name
realfname = fname
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'name', None)
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'filename', None)
if not realfname or not os.path.isfile(realfname):
realfname = None
# Try to get the file name from the file object, if it was not given:
if not fname:
fname = getattr(afile, 'name', None)
if not fname:
fname = getattr(afile, 'filename', None)
fname = self._fix_type_filename(ftype, fname)
if not fname:
raise ValueError('Could not deduce file name and none given')
if fname in self._files:
raise FileExistsInProjectError(fname)
if realfname is not None and os.path.isfile(realfname):
self._files[fname] = realfname
else:
self._files[fname] = afile
self.TYPE_INFO['lists'][ftype].append(fname)
return afile, fname
def append_sourcefile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='src')
def append_targetfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='tgt')
def append_transfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='trans')
def remove_file(self, fname, ftype=None):
"""Remove the file with the given project name from the project. If
the file type ('src', 'trans' or 'tgt') is not given, it is guessed.
"""
if fname not in self._files:
raise FileNotInProjectError(fname)
if not ftype:
# Guess file type (source/trans/target)
for ft, prefix in self.TYPE_INFO['f_prefix'].items():
if fname.startswith(prefix):
ftype = ft
break
self.TYPE_INFO['lists'][ftype].remove(fname)
if self._files[fname] and hasattr(self._files[fname], 'close'):
self._files[fname].close()
del self._files[fname]
def remove_sourcefile(self, fname):
self.remove_file(fname, ftype='src')
def remove_targetfile(self, fname):
self.remove_file(fname, ftype='tgt')
def remove_transfile(self, fname):
self.remove_file(fname, ftype='trans')
def close(self):
self.save()
def get_file(self, fname, mode='rb'):
"""Retrieve the file with the given name from the project store.
The file is looked up in the ``self._files`` dictionary. The values
in this dictionary may be ``None``, to indicate that the file is not
cacheable and needs to be retrieved in a special way. This special
way must be defined in this method of sub-classes. The value may
also be a string, which indicates that it is a real file accessible
via ``open``.
:type mode: str
:param mode: The mode in which to re-open the file (if it is closed).
"""
if fname not in self._files:
raise FileNotInProjectError(fname)
rfile = self._files[fname]
if isinstance(rfile, six.string_types):
rfile = open(rfile, 'rb')
# Check that the file is actually open
if getattr(rfile, 'closed', False):
rfname = fname
if not os.path.isfile(rfname):
rfname = getattr(rfile, 'name', None)
if not rfile or not os.path.isfile(rfname):
rfname = getattr(rfile, 'filename', None)
if not rfile or not os.path.isfile(rfname):
raise IOError('Could not locate file: %s (%s)' % (rfile, fname))
rfile = open(rfname, mode)
self._files[fname] = rfile
return rfile
def get_filename_type(self, fname):
"""Get the type of file ('src', 'trans', 'tgt') with the given name."""
for ftype in self.TYPE_INFO['lists']:
if fname in self.TYPE_INFO['lists'][ftype]:
return ftype
raise FileNotInProjectError(fname)
def get_proj_filename(self, realfname):
"""Try and find a project file name for the given real file name."""
for fname in self._files:
if fname == realfname or self._files[fname] == realfname:
return fname
raise ValueError('Real file not in project store: %s' % (realfname))
def load(self, *args, **kwargs):
"""Load the project in some way. Undefined for this (base) class."""
pass
def save(self, filename=None, *args, **kwargs):
"""Save the project in some way. Undefined for this (base) class."""
pass
def update_file(self, pfname, infile):
"""Remove the project file with name ``pfname`` and add the contents
from ``infile`` to the project under the same file name.
:returns: the results from :meth:`ProjectStore.append_file`.
"""
ftype = self.get_filename_type(pfname)
self.remove_file(pfname)
self.append_file(infile, pfname, ftype)
def _fix_type_filename(self, ftype, fname):
"""Strip the path from the filename and prepend the correct prefix."""
path, fname = os.path.split(fname)
return self.TYPE_INFO['f_prefix'][ftype] + fname
def _generate_settings(self):
"""@returns A XML string that represents the current settings."""
xml = etree.Element('translationproject')
# Add file names to settings XML
if self._sourcefiles:
sources_el = etree.Element('sources')
for fname in self._sourcefiles:
src_el = etree.Element('filename')
src_el.text = fname
sources_el.append(src_el)
xml.append(sources_el)
if self._transfiles:
transfiles_el = etree.Element('transfiles')
for fname in self._transfiles:
trans_el = etree.Element('filename')
trans_el.text = fname
transfiles_el.append(trans_el)
xml.append(transfiles_el)
if self._targetfiles:
target_el = etree.Element('targets')
for fname in self._targetfiles:
tgt_el = etree.Element('filename')
tgt_el.text = fname
target_el.append(tgt_el)
xml.append(target_el)
# Add conversion mappings
if self.convert_map:
conversions_el = etree.Element('conversions')
for in_fname, (out_fname, templ_fname) in six.iteritems(self.convert_map):
if in_fname not in self._files or out_fname not in self._files:
continue
conv_el = etree.Element('conv')
input_el = etree.Element('input')
input_el.text = in_fname
conv_el.append(input_el)
output_el = etree.Element('output')
output_el.text = out_fname
conv_el.append(output_el)
if templ_fname:
templ_el = etree.Element('template')
templ_el.text = templ_fname
conv_el.append(templ_el)
conversions_el.append(conv_el)
xml.append(conversions_el)
# Add options to settings
if 'options' in self.settings:
options_el = etree.Element('options')
for option, value in self.settings['options'].items():
opt_el = etree.Element('option')
opt_el.attrib['name'] = option
opt_el.text = value
options_el.append(opt_el)
xml.append(options_el)
return etree.tostring(xml, pretty_print=True)
def _load_settings(self, settingsxml):
"""Load project settings from the given XML string. ``settingsxml`` is
parsed into a DOM tree (``lxml.etree.fromstring``) which is then
inspected.
"""
settings = {}
xml = etree.fromstring(settingsxml)
# Load files in project
for section in ('sources', 'targets', 'transfiles'):
groupnode = xml.find(section)
if groupnode is None:
continue
settings[section] = []
for fnode in groupnode.getchildren():
settings[section].append(fnode.text)
conversions_el = xml.find('conversions')
if conversions_el is not None:
self.convert_map = {}
for conv_el in conversions_el.iterchildren():
in_fname, out_fname, templ_fname = None, None, None
for child_el in conv_el.iterchildren():
if child_el.tag == 'input':
in_fname = child_el.text
elif child_el.tag == 'output':
out_fname = child_el.text
elif child_el.tag == 'template':
templ_fname = child_el.text
# Make sure that in_fname and out_fname exist in
# settings['sources'], settings['targets'] or
# settings['transfiles']
in_found, out_found, templ_found = False, False, False
for section in ('sources', 'transfiles', 'targets'):
if section not in settings:
continue
if in_fname in settings[section]:
in_found = True
if out_fname in settings[section]:
out_found = True
if templ_fname and templ_fname in settings[section]:
templ_found = True
if in_found and out_found and (not templ_fname or templ_found):
self.convert_map[in_fname] = (out_fname, templ_fname)
# Load options
groupnode = xml.find('options')
if groupnode is not None:
settings['options'] = {}
for opt in groupnode.iterchildren():
settings['options'][opt.attrib['name']] = opt.text
self.settings = settings
|
diorcety/translate
|
translate/storage/projstore.py
|
Python
|
gpl-2.0
| 14,767
| 0.000339
|
from myhdl import *
from str_builder import StrBuilder
import os
import sys
class GenWrapperFile(object):
'''|
| This class is used to generate a python wrapper of a verilog design
|________'''
def __init__(self):
self.module_name = ''
self.interface = [] # keeps the order of the declarations
self.parameters = []
self.inputs = []
self.outputs = []
def generateWrapperFile(self):
'''|
| Generate <module_name_wrapper>.py file
|________'''
print "\nGenerating .py wrapper file."
s = StrBuilder()
self.genWrapperInterface(s)
s += '# Need this in order to work...\n'
# We assume that the clock is 'clk'!!!!!
s += '@always(clk.posedge)\n'
s += 'def pass_thru():\n'
s += s.indent() + 'pass\n\n'
s.dedent()
self.genTheWrapper(s)
self.genConvertFunc(s)
filename = self.module_name + '_wrp.py'
s.write(filename)
def genWrapperInterface(self, s):
'''|
| Generate the interfaces of the wrapper file
|________'''
s += 'from myhdl import *\n\n'
s += 'def ' + self.module_name + '_wrapper(\n'
s.indent(5)
for i in self.interface: # to preserve the order
if i['type'] != 'parameter':
s += i['name'] + ',\n'
s+= 'INST_NAME = "' + self.module_name.upper() + '",\n'
for p in self.parameters:
s += p['name'] + ' = ' + p['value'] + ',\n'
s -= 2
s += s.noIndent() + '):\n\n'
s.dedent(3)
def genTheWrapper(self, s, py_name=None):
'''|
| Generate the wrapper
|________'''
s += '#---------------------------------------------#\n'
s += '# Define the interface to the verilog ip core #\n'
s += '#---------------------------------------------#\n'
str_name = self.module_name if py_name == None else py_name
s += str_name + '_wrp.verilog_code = \\' + '\n'
if self.parameters != []:
s += '"""' + self.module_name + '#(\\n""" + \\' + '\n'
for p in self.parameters:
s += '""" .' + p["name"] + '($' + p["name"] + '),\\n""" + \\' + '\n'
s = s-11 + (s.noIndent() + '\\n""" + \\' + '\n')
s += '""" ) $INST_NAME (\\n""" + \\' + '\n'
else:
s += '"""' + self.module_name + ' $INST_NAME (\\n""" + \\' + '\n'
for i in self.interface:
if i["type"] != "parameter":
s += '""" .' + i["name"] + '($' + i["name"] + '),\\n""" + \\' + '\n'
s = s-11 + (s.noIndent() + '\\n""" + \\' + '\n')
s += '""");"""\n\n'
s += '#-------------------------------------------------------#\n'
s += '# output, needed when converting the wrapper to verilog #\n'
s += '#-------------------------------------------------------#\n'
for o in self.outputs:
s += o["name"] + '.driven = "wire"\n'
s += '\n'
s += 'return pass_thru\n\n\n'
def genConvertFunc(self, s):
'''|
| Generate function convert()
|________'''
s += s.noIndent() + 'def convert():\n\n'
for i in self.parameters:
s += i['name'] + ' = ' + i['value'] + '\n'
s += '\n'
# Declare signals
for i in self.interface:
if i['type'] != 'parameter':
x = i["size"]
stype = 'bool(0)'
if x.startswith('['):
stype = 'intbv(0)'
x = x.replace(":0", "+1:")
s += i['name'] + '= Signal(' + stype + x + ')\n'
s += '\n'
s += 'toVerilog(' + self.module_name + '_wrapper,\n'
s.indent(2)
for i in self.interface:
if i['type'] != 'parameter':
s += i['name'] + ' = ' + i['name'] + ',\n'
for p in self.parameters:
s += p['name'] + ' = ' + p['name'] + ',\n'
s = s-2 + (s.noIndent() + ' )\n\n\n')
s.dedent(4)
s += 'if __name__ == "__main__":\n'
s += s.indent() + 'convert()\n'
def initialize(self, filename):
'''|
| Initialize the GenWrapperFile object
|________'''
with open(filename) as f:
for line_number, line in enumerate(f):
w_list = line.split()
if w_list != []:
if w_list[0]=='module':
self.module_name = w_list[1]
elif w_list[0] == 'parameter':
name = w_list[1].replace(',','')
value = w_list[3].replace(',','')
self.parameters.append( {'name':name, 'value':value})
self.interface.append( {'name':name, 'value':value, 'type':w_list[0]})
elif w_list[0] == 'input' or w_list[0] == 'output':
name = w_list[3] if w_list[2].startswith('[') else w_list[2]
size = w_list[2] if w_list[2].startswith('[') else ''
if w_list[0] == 'input':
self.inputs.append( {'name':name.replace(',',''), 'size':size})
elif w_list[0] == 'output':
self.outputs.append({'name':name.replace(',',''), 'size':size})
self.interface.append( {'name':name.replace(',',''), 'size':size, 'type':w_list[0]})
elif w_list[0] == ');':
break
def main( args ):
gwf = GenWrapperFile()
gwf.initialize(args.file_name)
gwf.generateWrapperFile()
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a myhdl wrapper file.')
parser.add_argument('-f', '--file', dest='file_name', default="",
help='top-level verilog (.v) file')
args = parser.parse_args()
main( args )
|
hnikolov/pihdf
|
pihdf/printers/ip_wrapper_gen.py
|
Python
|
mit
| 6,357
| 0.016045
|
# encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('letter_to_editor', '0004_company_newspaper_webcache_wikipediapage'),
]
operations = [
migrations.AddField(
model_name='newspaper',
name='sister_newspapers',
field=models.ForeignKey(to='letter_to_editor.Newspaper', to_field='newspaper_name'),
preserve_default=True,
),
]
|
h4ck3rm1k3/letter-to-editor
|
newspaper/letter_to_editor/migrations/0005_newspaper_sister_newspapers.py
|
Python
|
agpl-3.0
| 478
| 0.002092
|
import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from scipy.spatial.distance import cdist
from sklearn.neighbors import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils import check_random_state
from sklearn.utils._testing import create_memmap_backed_data
from sklearn.utils.fixes import sp_version, parse_version
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1.0 / p)
rng = check_random_state(0)
d = 4
n1 = 20
n2 = 25
X1 = rng.random_sample((n1, d)).astype("float64", copy=False)
X2 = rng.random_sample((n2, d)).astype("float64", copy=False)
[X1_mmap, X2_mmap] = create_memmap_backed_data([X1, X2])
# make boolean arrays: ones and zeros
X1_bool = X1.round(0)
X2_bool = X2.round(0)
[X1_bool_mmap, X2_bool_mmap] = create_memmap_backed_data([X1_bool, X2_bool])
V = rng.random_sample((d, d))
VI = np.dot(V, V.T)
BOOL_METRICS = [
"matching",
"jaccard",
"dice",
"kulsinski",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"sokalsneath",
]
METRICS_DEFAULT_PARAMS = {
"euclidean": {},
"cityblock": {},
"minkowski": dict(p=(1, 1.5, 2, 3)),
"chebyshev": {},
"seuclidean": dict(V=(rng.random_sample(d),)),
"wminkowski": dict(p=(1, 1.5, 3), w=(rng.random_sample(d),)),
"mahalanobis": dict(VI=(VI,)),
"hamming": {},
"canberra": {},
"braycurtis": {},
}
@pytest.mark.parametrize("metric", METRICS_DEFAULT_PARAMS)
@pytest.mark.parametrize("X1, X2", [(X1, X2), (X1_mmap, X2_mmap)])
def test_cdist(metric, X1, X2):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
if metric == "mahalanobis":
# See: https://github.com/scipy/scipy/issues/13861
pytest.xfail("scipy#13861: cdist with 'mahalanobis' fails onmemmap data")
elif metric == "wminkowski":
if sp_version >= parse_version("1.8.0"):
pytest.skip("wminkowski will be removed in SciPy 1.8.0")
# wminkoski is deprecated in SciPy 1.6.0 and removed in 1.8.0
ExceptionToAssert = None
if sp_version >= parse_version("1.6.0"):
ExceptionToAssert = DeprecationWarning
with pytest.warns(ExceptionToAssert):
D_true = cdist(X1, X2, metric, **kwargs)
else:
D_true = cdist(X1, X2, metric, **kwargs)
check_cdist(metric, kwargs, D_true)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize(
"X1_bool, X2_bool", [(X1_bool, X2_bool), (X1_bool_mmap, X2_bool_mmap)]
)
def test_cdist_bool_metric(metric, X1_bool, X2_bool):
D_true = cdist(X1_bool, X2_bool, metric)
check_cdist_bool(metric, D_true)
def check_cdist(metric, kwargs, D_true):
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(X1, X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(X1_bool, X2_bool)
assert_array_almost_equal(D12, D_true)
@pytest.mark.parametrize("metric", METRICS_DEFAULT_PARAMS)
@pytest.mark.parametrize("X1, X2", [(X1, X2), (X1_mmap, X2_mmap)])
def test_pdist(metric, X1, X2):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
if metric == "mahalanobis":
# See: https://github.com/scipy/scipy/issues/13861
pytest.xfail("scipy#13861: pdist with 'mahalanobis' fails onmemmap data")
elif metric == "wminkowski":
if sp_version >= parse_version("1.8.0"):
pytest.skip("wminkowski will be removed in SciPy 1.8.0")
# wminkoski is deprecated in SciPy 1.6.0 and removed in 1.8.0
ExceptionToAssert = None
if sp_version >= parse_version("1.6.0"):
ExceptionToAssert = DeprecationWarning
with pytest.warns(ExceptionToAssert):
D_true = cdist(X1, X1, metric, **kwargs)
else:
D_true = cdist(X1, X1, metric, **kwargs)
check_pdist(metric, kwargs, D_true)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize("X1_bool", [X1_bool, X1_bool_mmap])
def test_pdist_bool_metrics(metric, X1_bool):
D_true = cdist(X1_bool, X1_bool, metric)
check_pdist_bool(metric, D_true)
def check_pdist(metric, kwargs, D_true):
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(X1_bool)
# Based on https://github.com/scipy/scipy/pull/7373
# When comparing two all-zero vectors, scipy>=1.2.0 jaccard metric
# was changed to return 0, instead of nan.
if metric == "jaccard" and sp_version < parse_version("1.2.0"):
D_true[np.isnan(D_true)] = 0
assert_array_almost_equal(D12, D_true)
@pytest.mark.parametrize("metric", METRICS_DEFAULT_PARAMS)
def test_pickle(metric):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
check_pickle(metric, kwargs)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize("X1_bool", [X1_bool, X1_bool_mmap])
def test_pickle_bool_metrics(metric, X1_bool):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(
np.sqrt(
np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2
)
)
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
msg = "Custom distance function must accept two vectors"
with pytest.raises(TypeError, match=msg):
BallTree(X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previously, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = check_random_state(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=custom_metric)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X) ** 2)
|
shyamalschandra/scikit-learn
|
sklearn/neighbors/tests/test_dist_metrics.py
|
Python
|
bsd-3-clause
| 8,002
| 0.0005
|
#!/usr/bin/python3
import exhibition
import inputdevice
import data
from levels import level1
import os
import pygame
import logging
log = logging.getLogger(__name__)
class Engine:
""" Main class responsible for running the game.
Controls game setup and runs the main loop.
Passes input to game and handles the event queue. """
def __init__(self):
""" Creates the display surface and loads the game assets. """
pygame.init()
log.info("Initializing display surface at {}x{}".format(
data.SCREEN_RESOLUTION[0], data.SCREEN_RESOLUTION[1]))
self.screen = pygame.display.set_mode(data.SCREEN_RESOLUTION)
pygame.display.set_caption("digital heist")
# load image resources
exhibition.images(os.path.join(data.DATA_DIR, "images"))
exhibition.optimize()
self.level = level1.Level1()
self.input = inputdevice.KeyboardInput()
def run(self):
""" Starts the game and runs the main game loop. """
self.main_loop()
pygame.quit()
def main_loop(self):
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return
# game update
self.input.update()
self.level.process_input(self.input)
complete = self.level.update()
self.screen.fill((0, 0, 0))
self.level.render()
pygame.display.flip()
# noinspection PyUnusedLocal,PyUnusedLocal
ms = clock.tick(60)
if complete:
break
|
kjwilcox/digital_heist
|
src/engine.py
|
Python
|
gpl-2.0
| 1,911
| 0.005756
|
# Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for computing evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def _metric_variable(name, shape, dtype):
"""Creates a Variable in LOCAL_VARIABLES and METRIC_VARIABLES collections."""
return tf.Variable(
initial_value=tf.zeros(shape, dtype),
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES, tf.GraphKeys.METRIC_VARIABLES],
name=name)
def _build_metrics(labels, predictions, weights, batch_losses, output_dim=1):
"""Builds TensorFlow operations to compute model evaluation metrics.
Args:
labels: Tensor with shape [batch_size].
predictions: Tensor with shape [batch_size, output_dim].
weights: Tensor with shape [batch_size].
batch_losses: Tensor with shape [batch_size].
output_dim: Dimension of model output
Returns:
A dictionary {metric_name: (metric_value, update_op).
"""
# Compute the predicted labels.
assert len(predictions.shape) == 2
binary_classification = output_dim == 1
if binary_classification:
assert predictions.shape[1] == 1
predictions = tf.squeeze(predictions, axis=[1])
predicted_labels = tf.cast(
tf.greater(predictions, 0.5), tf.int32, name="predicted_labels")
else:
predicted_labels = tf.argmax(
predictions, 1, name="predicted_labels", output_type=tf.int32)
metrics = {}
with tf.name_scope("metrics"):
# Total number of examples.
num_examples = _metric_variable("num_examples", [], tf.float32)
update_num_examples = tf.assign_add(num_examples, tf.reduce_sum(weights))
metrics["num_examples"] = (num_examples.read_value(), update_num_examples)
# Accuracy metrics.
num_correct = _metric_variable("num_correct", [], tf.float32)
is_correct = tf.equal(labels, predicted_labels)
weighted_is_correct = weights * tf.cast(is_correct, tf.float32)
update_num_correct = tf.assign_add(num_correct,
tf.reduce_sum(weighted_is_correct))
metrics["accuracy/num_correct"] = (num_correct.read_value(),
update_num_correct)
accuracy = tf.div(num_correct, num_examples, name="accuracy")
metrics["accuracy/accuracy"] = (accuracy, tf.no_op())
# Weighted cross-entropy loss.
metrics["losses/weighted_cross_entropy"] = tf.metrics.mean(
batch_losses, weights=weights, name="cross_entropy_loss")
def _count_condition(name, labels_value, predicted_value):
"""Creates a counter for given values of predictions and labels."""
count = _metric_variable(name, [], tf.float32)
is_equal = tf.logical_and(
tf.equal(labels, labels_value),
tf.equal(predicted_labels, predicted_value))
weighted_is_equal = weights * tf.cast(is_equal, tf.float32)
update_op = tf.assign_add(count, tf.reduce_sum(weighted_is_equal))
return count.read_value(), update_op
# Confusion matrix metrics.
num_labels = 2 if binary_classification else output_dim
for gold_label in range(num_labels):
for pred_label in range(num_labels):
metric_name = "confusion_matrix/label_{}_pred_{}".format(
gold_label, pred_label)
metrics[metric_name] = _count_condition(
metric_name, labels_value=gold_label, predicted_value=pred_label)
# Possibly create AUC metric for binary classification.
if binary_classification:
labels = tf.cast(labels, dtype=tf.bool)
metrics["auc"] = tf.metrics.auc(
labels, predictions, weights=weights, num_thresholds=1000)
return metrics
def create_metric_fn(model):
"""Creates a tuple (metric_fn, metric_fn_inputs).
This function is primarily used for creating a TPUEstimator.
The result of calling metric_fn(**metric_fn_inputs) is a dictionary
{metric_name: (metric_value, update_op)}.
Args:
model: Instance of AstroModel.
Returns:
A tuple (metric_fn, metric_fn_inputs).
"""
weights = model.weights
if weights is None:
weights = tf.ones_like(model.labels, dtype=tf.float32)
metric_fn_inputs = {
"labels": model.labels,
"predictions": model.predictions,
"weights": weights,
"batch_losses": model.batch_losses,
}
def metric_fn(labels, predictions, weights, batch_losses):
return _build_metrics(
labels,
predictions,
weights,
batch_losses,
output_dim=model.hparams.output_dim)
return metric_fn, metric_fn_inputs
def create_metrics(model):
"""Creates a dictionary {metric_name: (metric_value, update_op)}.
This function is primarily used for creating an Estimator.
Args:
model: Instance of AstroModel.
Returns:
A dictionary {metric_name: (metric_value, update_op).
"""
metric_fn, metric_fn_inputs = create_metric_fn(model)
return metric_fn(**metric_fn_inputs)
|
google-research/exoplanet-ml
|
exoplanet-ml/astronet/ops/metrics.py
|
Python
|
apache-2.0
| 5,503
| 0.005452
|
weakened, flagged = set(), set()
infected = {(i,j) for i,a in enumerate(open('22.in')) for j,b in enumerate(a.strip('\n')) if b=='#'}
s = len(open('22.in').readlines())/2
p = (s,s)
total = 0
d = (-1,0)
for _ in xrange(10**7):
if p in infected:
d = d[1], -1*d[0]
infected.remove(p)
flagged.add(p)
elif p in weakened:
total += 1
weakened.remove(p)
infected.add(p)
elif p in flagged:
d = -1*d[0], -1*d[1]
flagged.remove(p)
else:
d = -1*d[1], d[0]
weakened.add(p)
p = p[0]+d[0], p[1]+d[1]
print total
|
pedrotari7/advent_of_code
|
py/2017/22B.py
|
Python
|
mit
| 600
| 0.013333
|
from oscarbluelight.basket_utils import BluelightLineOfferConsumer as LineOfferConsumer
__all__ = [
"LineOfferConsumer",
]
|
thelabnyc/django-oscar-wfrs
|
sandbox/basket/utils.py
|
Python
|
isc
| 128
| 0.007813
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Feedback'
db.create_table('feedback_form_feedback', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('body', self.gf('django.db.models.fields.TextField')()),
('sent_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('feedback_form', ['Feedback'])
def backwards(self, orm):
# Deleting model 'Feedback'
db.delete_table('feedback_form_feedback')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedback_form.feedback': {
'Meta': {'object_name': 'Feedback'},
'body': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sent_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['feedback_form']
|
bashu/django-feedback-form
|
feedback_form/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 4,849
| 0.008043
|
# -*- coding: utf-8 -*-
import re
from mobify.source import MobifySource
class OReillySource(MobifySource):
HEADER = u"""
<h1>{title}</h1>
<p><strong>{lead}</strong></p>
<p><small>{author} @ oreilly.com</small><br></p>
"""
@staticmethod
def is_my_url(url):
# https://www.oreilly.com/ideas/the-evolution-of-devops
return 'oreilly.com/ideas/' in url
def get_inner_html(self):
article = self.xpath('//*[@itemprop="articleBody"]')
xpaths = [
'aside',
'div',
'figure[@class]',
]
# clean up the HTML
article = self.remove_nodes(article, xpaths)
html = self.get_node_html(article)
return html
def get_html(self):
# add a title and a footer
return '\n'.join([
self.HEADER.format(title=self.get_title(), author=self.get_author(), lead=self.get_lead()).strip(),
self.get_inner_html()
]).strip()
def get_title(self):
# <meta property="og:title" content="Radio w Poznaniu rozpoczęło nadawanie 90 lat temu" />
return self.get_node('//meta[@property="og:title"]', attr='content').strip()
def get_lead(self):
# <meta property="og:description" content="90 lat temu, 24 kwietnia 1927 roku nadawanie rozpoczęła..." />
lead = self.get_node('//meta[@property="og:description"]', attr='content').strip()
return lead.strip() if lead else ''
def get_author(self):
return self.get_node('//meta[@property="article:author"]', attr='content').strip()
def get_language(self):
return 'en'
|
macbre/mobify
|
mobify/sources/oreilly.py
|
Python
|
mit
| 1,627
| 0.003697
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad main_generator.py modelName
## e.g. c:\freecad\bin\freecad main_generator.py DIP8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
__title__ = "make Valve 3D models"
__author__ = "Stefan, based on Valve script"
__Comment__ = 'make varistor 3D models exported to STEP and VRML for Kicad StepUP script'
___ver___ = "1.3.3 14/08/2015"
# maui import cadquery as cq
# maui from Helpers import show
from collections import namedtuple
import math
import sys, os
import datetime
from datetime import datetime
sys.path.append("../_tools")
import exportPartToVRML as expVRML
import shaderColors
# maui start
import FreeCAD, Draft, FreeCADGui
import ImportGui
import FreeCADGui as Gui
#from Gui.Command import *
outdir=os.path.dirname(os.path.realpath(__file__) + os.sep + '..' + os.sep + '_3Dmodels')
scriptdir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(outdir)
sys.path.append(scriptdir)
if FreeCAD.GuiUp:
from PySide import QtCore, QtGui
# Licence information of the generated models.
#################################################################################################
STR_licAuthor = "kicad StepUp"
STR_licEmail = "ksu"
STR_licOrgSys = "kicad StepUp"
STR_licPreProc = "OCC"
STR_licOrg = "FreeCAD"
#################################################################################################
import cq_belfuse # modules parameters
from cq_belfuse import *
import cq_keystone # modules parameters
from cq_keystone import *
import cq_bulgin # modules parameters
from cq_bulgin import *
import cq_schurter # modules parameters
from cq_schurter import *
import cq_tme # modules parameters
from cq_tme import *
import cq_littlefuse # modules parameters
from cq_littlefuse import *
different_models = [
cq_belfuse(),
cq_keystone(),
cq_bulgin(),
cq_schurter(),
cq_tme(),
cq_littlefuse(),
]
def make_3D_model(models_dir, model_class, modelID):
LIST_license = ["",]
CheckedmodelName = 'A_' + modelID.replace('.', '').replace('-', '_').replace('(', '').replace(')', '')
CheckedmodelName = CheckedmodelName
Newdoc = App.newDocument(CheckedmodelName)
App.setActiveDocument(CheckedmodelName)
Gui.ActiveDocument=Gui.getDocument(CheckedmodelName)
destination_dir = model_class.get_dest_3D_dir(modelID)
material_substitutions = model_class.make_3D_model(modelID)
modelName = model_class.get_model_name(modelID)
doc = FreeCAD.ActiveDocument
doc.Label = CheckedmodelName
objs=GetListOfObjects(FreeCAD, doc)
objs[0].Label = CheckedmodelName
restore_Main_Tools()
script_dir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(models_dir)
out_dir=models_dir+os.sep+destination_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
exportSTEP(doc, modelName, out_dir)
if LIST_license[0]=="":
LIST_license=Lic.LIST_int_license
LIST_license.append("")
Lic.addLicenseToStep(out_dir + os.sep, modelName+".step", LIST_license,\
STR_licAuthor, STR_licEmail, STR_licOrgSys, STR_licOrg, STR_licPreProc)
# scale and export Vrml model
scale=1/2.54
#exportVRML(doc,modelName,scale,out_dir)
del objs
objs=GetListOfObjects(FreeCAD, doc)
expVRML.say("######################################################################")
expVRML.say(objs)
expVRML.say("######################################################################")
export_objects, used_color_keys = expVRML.determineColors(Gui, objs, material_substitutions)
export_file_name=out_dir+os.sep+modelName+'.wrl'
colored_meshes = expVRML.getColoredMesh(Gui, export_objects , scale)
#expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys)# , LIST_license
expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys, LIST_license)
#scale=0.3937001
#exportVRML(doc,modelName,scale,out_dir)
# Save the doc in Native FC format
saveFCdoc(App, Gui, doc, modelName,out_dir)
#display BBox
Gui.activateWorkbench("PartWorkbench")
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxometric()
#FreeCADGui.ActiveDocument.activeObject.BoundingBox = True
def run():
## # get variant names from command line
return
#import step_license as L
import add_license as Lic
# when run from command line
if __name__ == "__main__" or __name__ == "main_generator":
FreeCAD.Console.PrintMessage('\r\nRunning...\r\n')
full_path=os.path.realpath(__file__)
expVRML.say(full_path)
scriptdir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(scriptdir)
sub_path = full_path.split(scriptdir)
expVRML.say(sub_path)
sub_dir_name =full_path.split(os.sep)[-2]
expVRML.say(sub_dir_name)
sub_path = full_path.split(sub_dir_name)[0]
expVRML.say(sub_path)
models_dir=sub_path+"_3Dmodels"
model_to_build = ''
if len(sys.argv) < 3:
FreeCAD.Console.PrintMessage('No variant name is given, add a valid model name as an argument or the argument "all"\r\n')
sys.exit()
else:
model_to_build=sys.argv[2]
found_one = False
if len(model_to_build) > 0:
if model_to_build == 'all' or model_to_build == 'All' or model_to_build == 'ALL':
found_one = True
for n in different_models:
listall = n.get_list_all()
for i in listall:
make_3D_model(models_dir, n, i)
elif model_to_build == 'list':
found_one = True
FreeCAD.Console.PrintMessage('\r\n')
for n in different_models:
listall = n.get_list_all()
for i in listall:
FreeCAD.Console.PrintMessage(i + '\r\n')
else:
for n in different_models:
if n.model_exist(model_to_build):
found_one = True
make_3D_model(models_dir, n, model_to_build)
if not found_one:
print("Parameters for %s doesn't exist, skipping. " % model_to_build)
|
easyw/kicad-3d-models-in-freecad
|
cadquery/FCAD_script_generator/Fuse/main_generator.py
|
Python
|
gpl-2.0
| 8,654
| 0.012364
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from geonode import get_version
from geonode.catalogue import default_catalogue_backend
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from geonode.utils import ogc_server_settings
def resource_urls(request):
"""Global values to pass to templates"""
site = Site.objects.get_current()
return dict(
STATIC_URL=settings.STATIC_URL,
GEOSERVER_BASE_URL=ogc_server_settings.public_url,
CATALOGUE_BASE_URL=default_catalogue_backend()['URL'],
REGISTRATION_OPEN=settings.REGISTRATION_OPEN,
VERSION=get_version(),
SITE_NAME=site.name,
SITE_DOMAIN=site.domain,
GROUPS_APP = True if "geonode.contrib.groups" in settings.INSTALLED_APPS else False,
UPLOADER_URL = reverse('data_upload') if getattr(settings, 'UPLOADER', dict()).get('BACKEND', 'geonode.rest') == 'geonode.importer' else reverse('layer_upload'),
GEOGIT_ENABLED = ogc_server_settings.GEOGIT_ENABLED,
TIME_ENABLED = getattr(settings, 'UPLOADER', dict()).get('OPTIONS', dict()).get('TIME_ENABLED', False),
DEBUG_STATIC = getattr(settings, "DEBUG_STATIC", False),
MF_PRINT_ENABLED = ogc_server_settings.MAPFISH_PRINT_ENABLED,
PRINTNG_ENABLED = ogc_server_settings.PRINTNG_ENABLED,
GS_SECURITY_ENABLED = ogc_server_settings.GEONODE_SECURITY_ENABLED,
PROXY_URL = getattr(settings, 'PROXY_URL', '/proxy/?url='),
SOCIAL_BUTTONS = getattr(settings, 'SOCIAL_BUTTONS', True),
USE_DOCUMENTS = 'geonode.documents' in settings.INSTALLED_APPS
)
|
GISPPU/GrenadaLandInformation
|
geonode/context_processors.py
|
Python
|
gpl-3.0
| 2,437
| 0.010669
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import re
import os
import select
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
"""
helpDescription = """
Executes the same shell command in each project.
Output Formatting
-----------------
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
Environment
-----------
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
def WantPager(self, opt):
return opt.project_header
def Execute(self, opt, args):
if not opt.command:
self.Usage()
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
if cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
mirror = self.manifest.IsMirror
out = ForallColoring(self.manifest.manifestProject.config)
out.redirect(sys.stdout)
rc = 0
first = True
for project in self.GetProjects(args):
env = dict(os.environ.iteritems())
def setenv(name, val):
if val is None:
val = ''
env[name] = val
setenv('REPO_PROJECT', project.name)
setenv('REPO_PATH', project.relpath)
setenv('REPO_REMOTE', project.remote.name)
setenv('REPO_LREV', project.GetRevisionId())
setenv('REPO_RREV', project.revisionExpr)
if mirror:
setenv('GIT_DIR', project.gitdir)
cwd = project.gitdir
else:
cwd = project.worktree
if not os.path.exists(cwd):
if (opt.project_header and opt.verbose) \
or not opt.project_header:
print >>sys.stderr, 'skipping %s/' % project.relpath
continue
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd = cwd,
shell = shell,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
if opt.project_header:
class sfd(object):
def __init__(self, fd, dest):
self.fd = fd
self.dest = dest
def fileno(self):
return self.fd.fileno()
empty = True
didout = False
errbuf = ''
p.stdin.close()
s_in = [sfd(p.stdout, sys.stdout),
sfd(p.stderr, sys.stderr)]
for s in s_in:
flags = fcntl.fcntl(s.fd, fcntl.F_GETFL)
fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
while s_in:
in_ready, out_ready, err_ready = select.select(s_in, [], [])
for s in in_ready:
buf = s.fd.read(4096)
if not buf:
s.fd.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.fd == p.stdout:
didout = True
else:
errbuf += buf
continue
if empty:
if first:
first = False
else:
out.nl()
out.project('project %s/', project.relpath)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
if r != 0 and r != rc:
rc = r
if rc != 0:
sys.exit(rc)
|
abstrakraft/repo
|
subcmds/forall.py
|
Python
|
apache-2.0
| 7,450
| 0.010604
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mayaviviewerwidget.ui'
#
# Created: Mon Nov 11 18:02:00 2013
# by: pyside-uic 0.2.13 running on PySide 1.1.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(914, 548)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
self.horizontalLayout_2 = QtGui.QHBoxLayout(Dialog)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.widget = QtGui.QWidget(Dialog)
self.widget.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.widget.setObjectName("widget")
self.gridLayout = QtGui.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.widget1 = QtGui.QWidget(self.widget)
self.widget1.setMaximumSize(QtCore.QSize(500, 16777215))
self.widget1.setObjectName("widget1")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.widget1)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName("verticalLayout")
self.tableWidget = QtGui.QTableWidget(self.widget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
self.tableWidget.setSizePolicy(sizePolicy)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
self.tableWidget.horizontalHeader().setVisible(True)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setDefaultSectionSize(100)
self.verticalLayout.addWidget(self.tableWidget)
self.sliceplanegroup = QtGui.QGroupBox(self.widget1)
self.sliceplanegroup.setEnabled(False)
self.sliceplanegroup.setObjectName("sliceplanegroup")
self.horizontalLayout = QtGui.QHBoxLayout(self.sliceplanegroup)
self.horizontalLayout.setObjectName("horizontalLayout")
self.slicePlaneRadioX = QtGui.QRadioButton(self.sliceplanegroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slicePlaneRadioX.sizePolicy().hasHeightForWidth())
self.slicePlaneRadioX.setSizePolicy(sizePolicy)
self.slicePlaneRadioX.setChecked(False)
self.slicePlaneRadioX.setObjectName("slicePlaneRadioX")
self.horizontalLayout.addWidget(self.slicePlaneRadioX)
self.slicePlaneRadioY = QtGui.QRadioButton(self.sliceplanegroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slicePlaneRadioY.sizePolicy().hasHeightForWidth())
self.slicePlaneRadioY.setSizePolicy(sizePolicy)
self.slicePlaneRadioY.setChecked(True)
self.slicePlaneRadioY.setObjectName("slicePlaneRadioY")
self.horizontalLayout.addWidget(self.slicePlaneRadioY)
self.slicePlaneRadioZ = QtGui.QRadioButton(self.sliceplanegroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slicePlaneRadioZ.sizePolicy().hasHeightForWidth())
self.slicePlaneRadioZ.setSizePolicy(sizePolicy)
self.slicePlaneRadioZ.setObjectName("slicePlaneRadioZ")
self.horizontalLayout.addWidget(self.slicePlaneRadioZ)
self.verticalLayout.addWidget(self.sliceplanegroup)
self.screenshotgroup = QtGui.QGroupBox(self.widget1)
self.screenshotgroup.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.screenshotgroup.setObjectName("screenshotgroup")
self.formLayout = QtGui.QFormLayout(self.screenshotgroup)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.pixelsXLabel = QtGui.QLabel(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelsXLabel.sizePolicy().hasHeightForWidth())
self.pixelsXLabel.setSizePolicy(sizePolicy)
self.pixelsXLabel.setObjectName("pixelsXLabel")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.pixelsXLabel)
self.screenshotPixelXLineEdit = QtGui.QLineEdit(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.screenshotPixelXLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotPixelXLineEdit.setSizePolicy(sizePolicy)
self.screenshotPixelXLineEdit.setObjectName("screenshotPixelXLineEdit")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.screenshotPixelXLineEdit)
self.pixelsYLabel = QtGui.QLabel(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelsYLabel.sizePolicy().hasHeightForWidth())
self.pixelsYLabel.setSizePolicy(sizePolicy)
self.pixelsYLabel.setObjectName("pixelsYLabel")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.pixelsYLabel)
self.screenshotPixelYLineEdit = QtGui.QLineEdit(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.screenshotPixelYLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotPixelYLineEdit.setSizePolicy(sizePolicy)
self.screenshotPixelYLineEdit.setObjectName("screenshotPixelYLineEdit")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.screenshotPixelYLineEdit)
self.screenshotFilenameLabel = QtGui.QLabel(self.screenshotgroup)
self.screenshotFilenameLabel.setObjectName("screenshotFilenameLabel")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.screenshotFilenameLabel)
self.screenshotFilenameLineEdit = QtGui.QLineEdit(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.screenshotFilenameLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotFilenameLineEdit.setSizePolicy(sizePolicy)
self.screenshotFilenameLineEdit.setObjectName("screenshotFilenameLineEdit")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.screenshotFilenameLineEdit)
self.screenshotSaveButton = QtGui.QPushButton(self.screenshotgroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.screenshotSaveButton.sizePolicy().hasHeightForWidth())
self.screenshotSaveButton.setSizePolicy(sizePolicy)
self.screenshotSaveButton.setObjectName("screenshotSaveButton")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.screenshotSaveButton)
self.verticalLayout.addWidget(self.screenshotgroup)
self.closeButton = QtGui.QPushButton(self.widget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.closeButton.sizePolicy().hasHeightForWidth())
self.closeButton.setSizePolicy(sizePolicy)
self.closeButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.closeButton.setObjectName("closeButton")
self.verticalLayout.addWidget(self.closeButton)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.gridLayout.addWidget(self.widget1, 0, 0, 1, 1)
self.MayaviScene = MayaviSceneWidget(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.MayaviScene.sizePolicy().hasHeightForWidth())
self.MayaviScene.setSizePolicy(sizePolicy)
self.MayaviScene.setObjectName("MayaviScene")
self.gridLayout.addWidget(self.MayaviScene, 0, 1, 1, 1)
self.horizontalLayout_2.addWidget(self.widget)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Model Viewer", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setSortingEnabled(False)
self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("Dialog", "Visible", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("Dialog", "Type", None, QtGui.QApplication.UnicodeUTF8))
self.sliceplanegroup.setTitle(QtGui.QApplication.translate("Dialog", "Image Slice Plane", None, QtGui.QApplication.UnicodeUTF8))
self.slicePlaneRadioX.setText(QtGui.QApplication.translate("Dialog", "X", None, QtGui.QApplication.UnicodeUTF8))
self.slicePlaneRadioY.setText(QtGui.QApplication.translate("Dialog", "Y", None, QtGui.QApplication.UnicodeUTF8))
self.slicePlaneRadioZ.setText(QtGui.QApplication.translate("Dialog", "Z", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotgroup.setTitle(QtGui.QApplication.translate("Dialog", "Screenshot", None, QtGui.QApplication.UnicodeUTF8))
self.pixelsXLabel.setText(QtGui.QApplication.translate("Dialog", "Pixels X:", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotPixelXLineEdit.setText(QtGui.QApplication.translate("Dialog", "800", None, QtGui.QApplication.UnicodeUTF8))
self.pixelsYLabel.setText(QtGui.QApplication.translate("Dialog", "Pixels Y:", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotPixelYLineEdit.setText(QtGui.QApplication.translate("Dialog", "600", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotFilenameLabel.setText(QtGui.QApplication.translate("Dialog", "Filename:", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotFilenameLineEdit.setText(QtGui.QApplication.translate("Dialog", "screenshot.png", None, QtGui.QApplication.UnicodeUTF8))
self.screenshotSaveButton.setText(QtGui.QApplication.translate("Dialog", "Save Screenshot", None, QtGui.QApplication.UnicodeUTF8))
self.closeButton.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8))
from mayaviscenewidget import MayaviSceneWidget
|
MusculoskeletalAtlasProject/mapclient-tests
|
test_resources/updater_test/mayaviviewerstep-master/mapclientplugins/mayaviviewerstep/widgets/ui_mayaviviewerwidget.py
|
Python
|
apache-2.0
| 12,774
| 0.004619
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from lxml import etree
def participating_org(project):
"""
Generate the participating-org element.
:param project: Project object
:return: A list of Etree elements
"""
partnership_elements = []
from akvo.rsr.models import Partnership
for partnership in project.partnerships.all():
# Don't include reporting orgs or sponsor partners
if partnership.iati_organisation_role in Partnership.IATI_ROLE_LIST[:4] and \
partnership.organisation:
org = partnership.organisation.get_original()
element = etree.Element("participating-org")
if org.iati_org_id:
element.attrib['ref'] = org.iati_org_id
if org.new_organisation_type:
element.attrib['type'] = str(org.new_organisation_type)
if partnership.iati_organisation_role:
element.attrib['role'] = str(partnership.iati_organisation_role)
if partnership.iati_activity_id:
element.attrib['activity-id'] = partnership.iati_activity_id
# TODO: Funding amount
narrative_element = etree.SubElement(element, "narrative")
if org.long_name:
narrative_element.text = org.long_name
elif org.name:
narrative_element.text = org.name
partnership_elements.append(element)
return partnership_elements
|
akvo/akvo-rsr
|
akvo/iati/exports/elements/participating_org.py
|
Python
|
agpl-3.0
| 1,717
| 0.00233
|
#!/usr/bin/env python
import json
from tdclient import api, models
class Client:
"""API Client for Treasure Data Service
"""
def __init__(self, *args, **kwargs):
self._api = api.API(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def api(self):
"""
an instance of :class:`tdclient.api.API`
"""
return self._api
@property
def apikey(self):
"""
API key string.
"""
return self._api.apikey
def server_status(self):
"""
Returns:
a string represents current server status.
"""
return self.api.server_status()
def create_database(self, db_name, **kwargs):
"""
Args:
db_name (str): name of a database to create
Returns:
`True` if success
"""
return self.api.create_database(db_name, **kwargs)
def delete_database(self, db_name):
"""
Args:
db_name (str): name of database to delete
Returns:
`True` if success
"""
return self.api.delete_database(db_name)
def databases(self):
"""
Returns:
a list of :class:`tdclient.models.Database`
"""
databases = self.api.list_databases()
return [
models.Database(self, db_name, **kwargs)
for (db_name, kwargs) in databases.items()
]
def database(self, db_name):
"""
Args:
db_name (str): name of a database
Returns:
:class:`tdclient.models.Database`
"""
databases = self.api.list_databases()
for (name, kwargs) in databases.items():
if name == db_name:
return models.Database(self, name, **kwargs)
raise api.NotFoundError("Database '%s' does not exist" % (db_name))
def create_log_table(self, db_name, table_name):
"""
Args:
db_name (str): name of a database
table_name (str): name of a table to create
Returns:
`True` if success
"""
return self.api.create_log_table(db_name, table_name)
def swap_table(self, db_name, table_name1, table_name2):
"""
Args:
db_name (str): name of a database
table_name1 (str): original table name
table_name2 (str): table name you want to rename to
Returns:
`True` if success
"""
return self.api.swap_table(db_name, table_name1, table_name2)
def update_schema(self, db_name, table_name, schema):
"""Updates the schema of a table
Args:
db_name (str): name of a database
table_name (str): name of a table
schema (list): a dictionary object represents the schema definition (will
be converted to JSON)
e.g.
.. code-block:: python
[
["member_id", # column name
"string", # data type
"mem_id", # alias of the column name
],
["row_index", "long", "row_ind"],
...
]
Returns:
`True` if success
"""
return self.api.update_schema(db_name, table_name, json.dumps(schema))
def update_expire(self, db_name, table_name, expire_days):
"""Set expiration date to a table
Args:
db_name (str): name of a database
table_name (str): name of a table
epire_days (int): expiration date in days from today
Returns:
`True` if success
"""
return self.api.update_expire(db_name, table_name, expire_days)
def delete_table(self, db_name, table_name):
"""Delete a table
Args:
db_name (str): name of a database
table_name (str): name of a table
Returns:
a string represents the type of deleted table
"""
return self.api.delete_table(db_name, table_name)
def tables(self, db_name):
"""List existing tables
Args:
db_name (str): name of a database
Returns:
a list of :class:`tdclient.models.Table`
"""
m = self.api.list_tables(db_name)
return [
models.Table(self, db_name, table_name, **kwargs)
for (table_name, kwargs) in m.items()
]
def table(self, db_name, table_name):
"""
Args:
db_name (str): name of a database
table_name (str): name of a table
Returns:
:class:`tdclient.models.Table`
Raises:
tdclient.api.NotFoundError: if the table doesn't exist
"""
tables = self.tables(db_name)
for table in tables:
if table.table_name == table_name:
return table
raise api.NotFoundError("Table '%s.%s' does not exist" % (db_name, table_name))
def tail(self, db_name, table_name, count, to=None, _from=None, block=None):
"""Get the contents of the table in reverse order based on the registered time
(last data first).
Args:
db_name (str): Target database name.
table_name (str): Target table name.
count (int): Number for record to show up from the end.
to: Deprecated parameter.
_from: Deprecated parameter.
block: Deprecated parameter.
Returns:
[dict]: Contents of the table.
"""
return self.api.tail(db_name, table_name, count, to, _from, block)
def change_database(self, db_name, table_name, new_db_name):
"""Move a target table from it's original database to new destination database.
Args:
db_name (str): Target database name.
table_name (str): Target table name.
new_db_name (str): Destination database name to be moved.
Returns:
bool: `True` if succeeded.
"""
return self.api.change_database(db_name, table_name, new_db_name)
def query(
self,
db_name,
q,
result_url=None,
priority=None,
retry_limit=None,
type="hive",
**kwargs
):
"""Run a query on specified database table.
Args:
db_name (str): name of a database
q (str): a query string
result_url (str): result output URL. e.g.,
``postgresql://<username>:<password>@<hostname>:<port>/<database>/<table>``
priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.)
retry_limit (int): retry limit
type (str): name of a query engine
Returns:
:class:`tdclient.models.Job`
Raises:
ValueError: if unknown query type has been specified
"""
# for compatibility, assume type is hive unless specifically specified
if type not in ["hive", "pig", "impala", "presto"]:
raise ValueError("The specified query type is not supported: %s" % (type))
job_id = self.api.query(
q,
type=type,
db=db_name,
result_url=result_url,
priority=priority,
retry_limit=retry_limit,
**kwargs
)
return models.Job(self, job_id, type, q)
def jobs(self, _from=None, to=None, status=None, conditions=None):
"""List jobs
Args:
_from (int, optional): Gets the Job from the nth index in the list. Default: 0.
to (int, optional): Gets the Job up to the nth index in the list.
By default, the first 20 jobs in the list are displayed
status (str, optional): Filter by given status. {"queued", "running", "success", "error"}
conditions (str, optional): Condition for ``TIMESTAMPDIFF()`` to search for slow queries.
Avoid using this parameter as it can be dangerous.
Returns:
a list of :class:`tdclient.models.Job`
"""
results = self.api.list_jobs(_from, to, status, conditions)
return [job_from_dict(self, d) for d in results]
def job(self, job_id):
"""Get a job from `job_id`
Args:
job_id (str): job id
Returns:
:class:`tdclient.models.Job`
"""
d = self.api.show_job(str(job_id))
return job_from_dict(self, d, job_id=job_id)
def job_status(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a string represents the status of the job ("success", "error", "killed", "queued", "running")
"""
return self.api.job_status(job_id)
def job_result(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a list of each rows in result set
"""
return self.api.job_result(job_id)
def job_result_each(self, job_id):
"""
Args:
job_id (str): job id
Returns:
an iterator of result set
"""
for row in self.api.job_result_each(job_id):
yield row
def job_result_format(self, job_id, format):
"""
Args:
job_id (str): job id
format (str): output format of result set
Returns:
a list of each rows in result set
"""
return self.api.job_result_format(job_id, format)
def job_result_format_each(self, job_id, format):
"""
Args:
job_id (str): job id
format (str): output format of result set
Returns:
an iterator of rows in result set
"""
for row in self.api.job_result_format_each(job_id, format):
yield row
def kill(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a string represents the status of killed job ("queued", "running")
"""
return self.api.kill(job_id)
def export_data(self, db_name, table_name, storage_type, params=None):
"""Export data from Treasure Data Service
Args:
db_name (str): name of a database
table_name (str): name of a table
storage_type (str): type of the storage
params (dict): optional parameters. Assuming the following keys:
- access_key_id (str):
ID to access the information to be exported.
- secret_access_key (str):
Password for the `access_key_id`.
- file_prefix (str, optional):
Filename of exported file.
Default: "<database_name>/<table_name>"
- file_format (str, optional):
File format of the information to be
exported. {"jsonl.gz", "tsv.gz", "json.gz"}
- from (int, optional):
From Time of the data to be exported in Unix epoch format.
- to (int, optional):
End Time of the data to be exported in Unix epoch format.
- assume_role (str, optional): Assume role.
- bucket (str):
Name of bucket to be used.
- domain_key (str, optional):
Job domain key.
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
Returns:
:class:`tdclient.models.Job`
"""
params = {} if params is None else params
job_id = self.api.export_data(db_name, table_name, storage_type, params)
return models.Job(self, job_id, "export", None)
def partial_delete(self, db_name, table_name, to, _from, params=None):
"""Create a job to partially delete the contents of the table with the given
time range.
Args:
db_name (str): Target database name.
table_name (str): Target table name.
to (int): Time in Unix Epoch format indicating the End date and time of the
data to be deleted. Should be set only by the hour. Minutes and seconds
values will not be accepted.
_from (int): Time in Unix Epoch format indicating the Start date and time of
the data to be deleted. Should be set only by the hour. Minutes and
seconds values will not be accepted.
params (dict, optional): Extra parameters.
- pool_name (str, optional):
Indicates the resource pool to execute this
job. If not provided, the account's default resource pool would be
used.
- domain_key (str, optional):
Domain key that will be assigned to the
partial delete job to be created
Returns:
:class:`tdclient.models.Job`
"""
params = {} if params is None else params
job_id = self.api.partial_delete(db_name, table_name, to, _from, params)
return models.Job(self, job_id, "partialdelete", None)
def create_bulk_import(self, name, database, table, params=None):
"""Create new bulk import session
Args:
name (str): name of new bulk import session
database (str): name of a database
table (str): name of a table
Returns:
:class:`tdclient.models.BulkImport`
"""
params = {} if params is None else params
self.api.create_bulk_import(name, database, table, params)
return models.BulkImport(self, name=name, database=database, table=table)
def delete_bulk_import(self, name):
"""Delete a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.delete_bulk_import(name)
def freeze_bulk_import(self, name):
"""Freeze a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.freeze_bulk_import(name)
def unfreeze_bulk_import(self, name):
"""Unfreeze a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.unfreeze_bulk_import(name)
def perform_bulk_import(self, name):
"""Perform a bulk import session
Args:
name (str): name of a bulk import session
Returns:
:class:`tdclient.models.Job`
"""
job_id = self.api.perform_bulk_import(name)
return models.Job(self, job_id, "bulk_import", None)
def commit_bulk_import(self, name):
"""Commit a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.commit_bulk_import(name)
def bulk_import_error_records(self, name):
"""
Args:
name (str): name of a bulk import session
Returns:
an iterator of error records
"""
for record in self.api.bulk_import_error_records(name):
yield record
def bulk_import(self, name):
"""Get a bulk import session
Args:
name (str): name of a bulk import session
Returns:
:class:`tdclient.models.BulkImport`
"""
data = self.api.show_bulk_import(name)
return models.BulkImport(self, **data)
def bulk_imports(self):
"""List bulk import sessions
Returns:
a list of :class:`tdclient.models.BulkImport`
"""
return [
models.BulkImport(self, **data) for data in self.api.list_bulk_imports()
]
def bulk_import_upload_part(self, name, part_name, bytes_or_stream, size):
"""Upload a part to a bulk import session
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
bytes_or_stream (file-like): a file-like object contains the part
size (int): the size of the part
"""
return self.api.bulk_import_upload_part(name, part_name, bytes_or_stream, size)
def bulk_import_upload_file(self, name, part_name, format, file, **kwargs):
"""Upload a part to Bulk Import session, from an existing file on filesystem.
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
format (str): format of data type (e.g. "msgpack", "json", "csv", "tsv")
file (str or file-like): the name of a file, or a file-like object,
containing the data
**kwargs: extra arguments.
There is more documentation on `format`, `file` and `**kwargs` at
`file import parameters`_.
In particular, for "csv" and "tsv" data, you can change how data columns
are parsed using the ``dtypes`` and ``converters`` arguments.
* ``dtypes`` is a dictionary used to specify a datatype for individual
columns, for instance ``{"col1": "int"}``. The available datatypes
are ``"bool"``, ``"float"``, ``"int"``, ``"str"`` and ``"guess"``.
If a column is also mentioned in ``converters``, then the function
will be used, NOT the datatype.
* ``converters`` is a dictionary used to specify a function that will
be used to parse individual columns, for instance ``{"col1", int}``.
The default behaviour is ``"guess"``, which makes a best-effort to decide
the column datatype. See `file import parameters`_ for more details.
.. _`file import parameters`:
https://tdclient.readthedocs.io/en/latest/file_import_parameters.html
"""
return self.api.bulk_import_upload_file(name, part_name, format, file, **kwargs)
def bulk_import_delete_part(self, name, part_name):
"""Delete a part from a bulk import session
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
Returns:
`True` if success
"""
return self.api.bulk_import_delete_part(name, part_name)
def list_bulk_import_parts(self, name):
"""List parts of a bulk import session
Args:
name (str): name of a bulk import session
Returns:
a list of string represents the name of parts
"""
return self.api.list_bulk_import_parts(name)
def create_schedule(self, name, params=None):
"""Create a new scheduled query with the specified name.
Args:
name (str): Scheduled query name.
params (dict, optional): Extra parameters.
- type (str):
Query type. {"presto", "hive"}. Default: "hive"
- database (str):
Target database name.
- timezone (str):
Scheduled query's timezone. e.g. "UTC"
For details, see also: https://gist.github.com/frsyuki/4533752
- cron (str, optional):
Schedule of the query.
{``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)}
See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console
- delay (int, optional):
A delay ensures all buffered events are imported
before running the query. Default: 0
- query (str):
Is a language used to retrieve, insert, update and modify
data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries
- priority (int, optional):
Priority of the query.
Range is from -2 (very low) to 2 (very high). Default: 0
- retry_limit (int, optional):
Automatic retry count. Default: 0
- engine_version (str, optional):
Engine version to be used. If none is
specified, the account's default engine version would be set.
{"stable", "experimental"}
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
- result (str, optional):
Location where to store the result of the query.
e.g. 'tableau://user:password@host.com:1234/datasource'
Returns:
:class:`datetime.datetime`: Start date time.
"""
if "cron" not in params:
raise ValueError("'cron' option is required")
if "query" not in params:
raise ValueError("'query' option is required")
params = {} if params is None else params
return self.api.create_schedule(name, params)
def delete_schedule(self, name):
"""Delete the scheduled query with the specified name.
Args:
name (str): Target scheduled query name.
Returns:
(str, str): Tuple of cron and query.
"""
return self.api.delete_schedule(name)
def schedules(self):
"""Get the list of all the scheduled queries.
Returns:
[:class:`tdclient.models.Schedule`]
"""
result = self.api.list_schedules()
return [models.Schedule(self, **m) for m in result]
def update_schedule(self, name, params=None):
"""Update the scheduled query.
Args:
name (str): Target scheduled query name.
params (dict): Extra parameters.
- type (str):
Query type. {"presto", "hive"}. Default: "hive"
- database (str):
Target database name.
- timezone (str):
Scheduled query's timezone. e.g. "UTC"
For details, see also: https://gist.github.com/frsyuki/4533752
- cron (str, optional):
Schedule of the query.
{``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)}
See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console
- delay (int, optional):
A delay ensures all buffered events are imported
before running the query. Default: 0
- query (str):
Is a language used to retrieve, insert, update and modify
data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries
- priority (int, optional):
Priority of the query.
Range is from -2 (very low) to 2 (very high). Default: 0
- retry_limit (int, optional):
Automatic retry count. Default: 0
- engine_version (str, optional):
Engine version to be used. If none is
specified, the account's default engine version would be set.
{"stable", "experimental"}
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
- result (str, optional):
Location where to store the result of the query.
e.g. 'tableau://user:password@host.com:1234/datasource'
"""
params = {} if params is None else params
self.api.update_schedule(name, params)
def history(self, name, _from=None, to=None):
"""Get the history details of the saved query for the past 90days.
Args:
name (str): Target name of the scheduled query.
_from (int, optional): Indicates from which nth record in the run history
would be fetched.
Default: 0.
Note: Count starts from zero. This means that the first record in the
list has a count of zero.
to (int, optional): Indicates up to which nth record in the run history
would be fetched.
Default: 20
Returns:
[:class:`tdclient.models.ScheduledJob`]
"""
result = self.api.history(name, _from, to)
def scheduled_job(m):
(
scheduled_at,
job_id,
type,
status,
query,
start_at,
end_at,
result_url,
priority,
database,
) = m
job_param = {
"url": None,
"debug": None,
"start_at": start_at,
"end_at": end_at,
"cpu_time": None,
"result_size": None,
"result": None,
"result_url": result_url,
"hive_result_schema": None,
"priority": priority,
"retry_limit": None,
"org_name": None,
"database": database,
}
return models.ScheduledJob(
self, scheduled_at, job_id, type, query, **job_param
)
return [scheduled_job(m) for m in result]
def run_schedule(self, name, time, num):
"""Execute the specified query.
Args:
name (str): Target scheduled query name.
time (int): Time in Unix epoch format that would be set as TD_SCHEDULED_TIME
num (int): Indicates how many times the query will be executed.
Value should be 9 or less.
Returns:
[:class:`tdclient.models.ScheduledJob`]
"""
results = self.api.run_schedule(name, time, num)
def scheduled_job(m):
job_id, type, scheduled_at = m
return models.ScheduledJob(self, scheduled_at, job_id, type, None)
return [scheduled_job(m) for m in results]
def import_data(
self, db_name, table_name, format, bytes_or_stream, size, unique_id=None
):
"""Import data into Treasure Data Service
Args:
db_name (str): name of a database
table_name (str): name of a table
format (str): format of data type (e.g. "msgpack.gz")
bytes_or_stream (str or file-like): a byte string or a file-like object contains the data
size (int): the length of the data
unique_id (str): a unique identifier of the data
Returns:
second in float represents elapsed time to import data
"""
return self.api.import_data(
db_name, table_name, format, bytes_or_stream, size, unique_id=unique_id
)
def import_file(self, db_name, table_name, format, file, unique_id=None):
"""Import data into Treasure Data Service, from an existing file on filesystem.
This method will decompress/deserialize records from given file, and then
convert it into format acceptable from Treasure Data Service ("msgpack.gz").
Args:
db_name (str): name of a database
table_name (str): name of a table
format (str): format of data type (e.g. "msgpack", "json")
file (str or file-like): a name of a file, or a file-like object contains the data
unique_id (str): a unique identifier of the data
Returns:
float represents the elapsed time to import data
"""
return self.api.import_file(
db_name, table_name, format, file, unique_id=unique_id
)
def results(self):
"""Get the list of all the available authentications.
Returns:
a list of :class:`tdclient.models.Result`
"""
results = self.api.list_result()
def result(m):
name, url, organizations = m
return models.Result(self, name, url, organizations)
return [result(m) for m in results]
def create_result(self, name, url, params=None):
"""Create a new authentication with the specified name.
Args:
name (str): Authentication name.
url (str): Url of the authentication to be created. e.g. "ftp://test.com/"
params (dict, optional): Extra parameters.
Returns:
bool: True if succeeded.
"""
params = {} if params is None else params
return self.api.create_result(name, url, params)
def delete_result(self, name):
"""Delete the authentication having the specified name.
Args:
name (str): Authentication name.
Returns:
bool: True if succeeded.
"""
return self.api.delete_result(name)
def users(self):
"""List users
Returns:
a list of :class:`tdclient.models.User`
"""
results = self.api.list_users()
def user(m):
name, org, roles, email = m
return models.User(self, name, org, roles, email)
return [user(m) for m in results]
def add_user(self, name, org, email, password):
"""Add a new user
Args:
name (str): name of the user
org (str): organization
email: (str): e-mail address
password (str): password
Returns:
`True` if success
"""
return self.api.add_user(name, org, email, password)
def remove_user(self, name):
"""Remove a user
Args:
name (str): name of the user
Returns:
`True` if success
"""
return self.api.remove_user(name)
def list_apikeys(self, name):
"""
Args:
name (str): name of the user
Returns:
a list of string of API key
"""
return self.api.list_apikeys(name)
def add_apikey(self, name):
"""
Args:
name (str): name of the user
Returns:
`True` if success
"""
return self.api.add_apikey(name)
def remove_apikey(self, name, apikey):
"""
Args:
name (str): name of the user
apikey (str): an API key to remove
Returns:
`True` if success
"""
return self.api.remove_apikey(name, apikey)
def close(self):
"""Close opened API connections.
"""
return self._api.close()
def job_from_dict(client, dd, **values):
d = dict()
d.update(dd)
d.update(values)
return models.Job(
client,
d["job_id"],
d["type"],
d["query"],
status=d.get("status"),
url=d.get("url"),
debug=d.get("debug"),
start_at=d.get("start_at"),
end_at=d.get("end_at"),
created_at=d.get("created_at"),
updated_at=d.get("updated_at"),
cpu_time=d.get("cpu_time"),
result_size=d.get("result_size"),
result=d.get("result"),
result_url=d.get("result_url"),
hive_result_schema=d.get("hive_result_schema"),
priority=d.get("priority"),
retry_limit=d.get("retry_limit"),
org_name=d.get("org_name"),
database=d.get("database"),
num_records=d.get("num_records"),
user_name=d.get("user_name"),
linked_result_export_job_id=d.get("linked_result_export_job_id"),
result_export_target_job_id=d.get("result_export_target_job_id"),
)
|
treasure-data/td-client-python
|
tdclient/client.py
|
Python
|
apache-2.0
| 32,550
| 0.001505
|
"""
API views for badges
"""
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from opaque_keys import InvalidKeyError
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
from rest_framework import generics
from rest_framework.exceptions import APIException
from badges.models import BadgeAssertion
from openedx.core.djangoapps.user_api.permissions import is_field_shared_factory
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from .serializers import BadgeAssertionSerializer
class InvalidCourseKeyError(APIException):
"""
Raised the course key given isn't valid.
"""
status_code = 400
default_detail = "The course key provided was invalid."
class UserBadgeAssertions(generics.ListAPIView):
"""
** Use cases **
Request a list of assertions for a user, optionally constrained to a course.
** Example Requests **
GET /api/badges/v1/assertions/user/{username}/
** Response Values **
Body comprised of a list of objects with the following fields:
* badge_class: The badge class the assertion was awarded for. Represented as an object
with the following fields:
* slug: The identifier for the badge class
* issuing_component: The software component responsible for issuing this badge.
* display_name: The display name of the badge.
* course_id: The course key of the course this badge is scoped to, or null if it isn't scoped to a course.
* description: A description of the award and its significance.
* criteria: A description of what is needed to obtain this award.
* image_url: A URL to the icon image used to represent this award.
* image_url: The baked assertion image derived from the badge_class icon-- contains metadata about the award
in its headers.
* assertion_url: The URL to the OpenBadges BadgeAssertion object, for verification by compatible tools
and software.
** Params **
* slug (optional): The identifier for a particular badge class to filter by.
* issuing_component (optional): The issuing component for a particular badge class to filter by
(requires slug to have been specified, or this will be ignored.) If slug is provided and this is not,
assumes the issuing_component should be empty.
* course_id (optional): Returns assertions that were awarded as part of a particular course. If slug is
provided, and this field is not specified, assumes that the target badge has an empty course_id field.
'*' may be used to get all badges with the specified slug, issuing_component combination across all courses.
** Returns **
* 200 on success, with a list of Badge Assertion objects.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the specified user does not exist
{
"count": 7,
"previous": null,
"num_pages": 1,
"results": [
{
"badge_class": {
"slug": "special_award",
"issuing_component": "openedx__course",
"display_name": "Very Special Award",
"course_id": "course-v1:edX+DemoX+Demo_Course",
"description": "Awarded for people who did something incredibly special",
"criteria": "Do something incredibly special.",
"image": "http://example.com/media/badge_classes/badges/special_xdpqpBv_9FYOZwN.png"
},
"image_url": "http://badges.example.com/media/issued/cd75b69fc1c979fcc1697c8403da2bdf.png",
"assertion_url": "http://badges.example.com/public/assertions/07020647-e772-44dd-98b7-d13d34335ca6"
},
...
]
}
"""
serializer_class = BadgeAssertionSerializer
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser
)
permission_classes = (is_field_shared_factory("accomplishments_shared"),)
def filter_queryset(self, queryset):
"""
Return most recent to least recent badge.
"""
return queryset.order_by('-created')
def get_queryset(self):
"""
Get all badges for the username specified.
"""
queryset = BadgeAssertion.objects.filter(user__username=self.kwargs['username'])
provided_course_id = self.request.query_params.get('course_id')
if provided_course_id == '*':
# We might want to get all the matching course scoped badges to see how many courses
# a user managed to get a specific award on.
course_id = None
elif provided_course_id:
try:
course_id = CourseKey.from_string(provided_course_id)
except InvalidKeyError:
raise InvalidCourseKeyError
elif 'slug' not in self.request.query_params:
# Need to get all badges for the user.
course_id = None
else:
# Django won't let us use 'None' for querying a ForeignKey field. We have to use this special
# 'Empty' value to indicate we're looking only for badges without a course key set.
course_id = CourseKeyField.Empty
if course_id is not None:
queryset = queryset.filter(badge_class__course_id=course_id)
if self.request.query_params.get('slug'):
queryset = queryset.filter(
badge_class__slug=self.request.query_params['slug'],
badge_class__issuing_component=self.request.query_params.get('issuing_component', '')
)
return queryset
|
teltek/edx-platform
|
lms/djangoapps/badges/api/views.py
|
Python
|
agpl-3.0
| 6,051
| 0.003966
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.platform.resource_loader import get_data_files_path
from tensorflow.python.platform.resource_loader import get_path_to_datafile
from tensorflow.python.platform.resource_loader import get_root_dir_with_all_resources
from tensorflow.python.platform.resource_loader import load_resource
from tensorflow.python.platform.resource_loader import readahead_file_path
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/resource_loader/__init__.py
|
Python
|
mit
| 532
| 0.003759
|
import sst
import sst.actions
# tests for simulate_keys
sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)
sst.actions.go_to('/')
sst.actions.assert_title('The Page Title')
sst.actions.write_textfield('text_1', 'Foobar..')
sst.actions.simulate_keys('text_1', 'BACK_SPACE')
sst.actions.simulate_keys('text_1', 'back_space') # not case sensitive
sst.actions.simulate_keys('text_1', 'SPACE')
sst.actions.simulate_keys('text_1', 'Space')
sst.actions.assert_text('text_1', 'Foobar ')
# available keys (from selenium/webdriver/common/keys.py):
#
# 'NULL'
# 'CANCEL'
# 'HELP'
# 'BACK_SPACE'
# 'TAB'
# 'CLEAR'
# 'RETURN'
# 'ENTER'
# 'SHIFT'
# 'LEFT_SHIFT'
# 'CONTROL'
# 'LEFT_CONTROL'
# 'ALT'
# 'LEFT_ALT'
# 'PAUSE'
# 'ESCAPE'
# 'SPACE'
# 'PAGE_UP'
# 'PAGE_DOWN'
# 'END'
# 'HOME'
# 'LEFT'
# 'ARROW_LEFT'
# 'UP'
# 'ARROW_UP'
# 'RIGHT'
# 'ARROW_RIGHT'
# 'DOWN'
# 'ARROW_DOWN'
# 'INSERT'
# 'DELETE'
# 'SEMICOLON'
# 'EQUALS'
# 'NUMPAD0'
# 'NUMPAD1'
# 'NUMPAD2'
# 'NUMPAD3'
# 'NUMPAD4'
# 'NUMPAD5'
# 'NUMPAD6'
# 'NUMPAD7'
# 'NUMPAD8'
# 'NUMPAD9'
# 'MULTIPLY'
# 'ADD'
# 'SEPARATOR'
# 'SUBTRACT'
# 'DECIMAL'
# 'DIVIDE'
# 'F1'
# 'F2'
# 'F3'
# 'F4'
# 'F5'
# 'F6'
# 'F7'
# 'F8'
# 'F9'
# 'F10'
# 'F11'
# 'F12'
# 'META'
# 'COMMAND'
|
DramaFever/sst
|
src/sst/selftests/keys.py
|
Python
|
apache-2.0
| 1,307
| 0
|
# DO NOT EDIT THIS FILE!
#
# Python module CosTradingDynamic generated by omniidl
import omniORB
omniORB.updateModule("CosTradingDynamic")
# ** 1. Stub files contributing to this module
import CosTradingDynamic_idl
# ** 2. Sub-modules
# ** 3. End
|
amonmoce/corba_examples
|
omniORBpy-4.2.1/build/python/COS/CosTradingDynamic/__init__.py
|
Python
|
mit
| 251
| 0.003984
|
# -*- coding: utf-8 -*-
__author__ = 'massimo'
import unittest
class UtilTestFunctions(unittest.TestCase):
def test_verify_url(self):
self.assertTrue(True)
if __name__ == "__main__":
UtilTestFunctions.main()
|
yangsibai/SiteMiner
|
test/util_test.py
|
Python
|
mit
| 229
| 0.004367
|
from __future__ import unicode_literals
from django.db import models
from wagtail.core.models import Site
class SystemString(models.Model):
DEFAULT_GROUP = 'general'
identifier = models.CharField(max_length=1024)
string = models.CharField(max_length=1024, blank=True, null=True)
group = models.CharField(max_length=255, default=DEFAULT_GROUP)
site = models.ForeignKey(Site, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(null=True, blank=True)
accessed = models.DateTimeField(null=True, blank=True)
modified = models.BooleanField(default=False)
class Meta:
unique_together = ['identifier', 'site', 'group']
def __unicode__(self):
return unicode(self.identifier)
|
Frojd/wagtail-systemtext
|
wagtailsystemtext/models.py
|
Python
|
mit
| 791
| 0
|
"""
Siren protocol adapter. See `SIREN specification <https://github.com/kevinswiber/siren>`_.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ripozo.adapters import AdapterBase
from ripozo.utilities import titlize_endpoint
from ripozo.resources.resource_base import create_url
from ripozo.resources.constants import input_categories
import json
import six
_CONTENT_TYPE = 'application/vnd.siren+json'
class SirenAdapter(AdapterBase):
"""
An adapter that formats the response in the SIREN format.
A description of a SIREN format can be found here:
`SIREN specification <https://github.com/kevinswiber/siren>`_
"""
formats = [_CONTENT_TYPE, 'siren']
extra_headers = {'Content-Type': _CONTENT_TYPE}
@property
def formatted_body(self):
"""
Gets the formatted body of the response in unicode form.
If ``self.status_code == 204`` then this will
return an empty string.
:return: The siren formatted response body
:rtype: unicode
"""
# 204's are supposed to be empty responses
if self.status_code == 204:
return ''
links = self.generate_links()
entities = self.get_entities()
response = dict(properties=self.resource.properties, actions=self._actions,
links=links, entities=entities)
# need to do this separately since class is a reserved keyword
response['class'] = [self.resource.resource_name]
return json.dumps(response)
@property
def _actions(self):
"""
Gets the list of actions in an appropriately SIREN format
:return: The list of actions
:rtype: list
"""
actions = []
for endpoint, options in six.iteritems(self.resource.endpoint_dictionary()):
options = options[0]
all_methods = options.get('methods', ('GET',))
meth = all_methods[0] if all_methods else 'GET'
base_route = options.get('route', self.resource.base_url)
route = create_url(base_route, **self.resource.properties)
route = self.combine_base_url_with_resource_url(route)
fields = self.generate_fields_for_endpoint_funct(options.get('endpoint_func'))
actn = dict(name=endpoint, title=titlize_endpoint(endpoint),
method=meth, href=route, fields=fields)
actions.append(actn)
return actions
def generate_fields_for_endpoint_funct(self, endpoint_func):
"""
Returns the action's fields attribute in a SIREN
appropriate format.
:param apimethod endpoint_func:
:return: A dictionary of action fields
:rtype: dict
"""
return_fields = []
fields_method = getattr(endpoint_func, 'fields', None)
if not fields_method:
return []
fields = fields_method(self.resource.manager)
for field in fields:
if field.arg_type is input_categories.URL_PARAMS:
continue
field_dict = dict(name=field.name, type=field.field_type.__name__,
location=field.arg_type, required=field.required)
return_fields.append(field_dict)
return return_fields
def generate_links(self):
"""
Generates the Siren links for the resource.
:return: The list of Siren formatted links.
:rtype: list
"""
href = self.combine_base_url_with_resource_url(self.resource.url)
links = [dict(rel=['self'], href=href)]
for link, link_name, embedded in self.resource.linked_resources:
links.append(dict(rel=[link_name],
href=self.combine_base_url_with_resource_url(link.url)))
return links
def get_entities(self):
"""
Gets a list of related entities in an appropriate SIREN format
:return: A list of entities
:rtype: list
"""
entities = []
for resource, name, embedded in self.resource.related_resources:
for ent in self.generate_entity(resource, name, embedded):
entities.append(ent)
return entities
def generate_entity(self, resource, name, embedded):
"""
A generator that yields entities
"""
if isinstance(resource, list):
for res in resource:
for ent in self.generate_entity(res, name, embedded):
yield ent
else:
if not resource.has_all_pks:
return
ent = {'class': [resource.resource_name], 'rel': [name]}
resource_url = self.combine_base_url_with_resource_url(resource.url)
if not embedded:
ent['href'] = resource_url
else:
ent['properties'] = resource.properties
ent['links'] = [dict(rel=['self'], href=resource_url)]
yield ent
@classmethod
def format_exception(cls, exc):
"""
Takes an exception and appropriately formats it
in the siren format. Mostly. It doesn't return
a self in this circumstance.
:param Exception exc: The exception to format.
:return: A tuple containing: response body, format,
http response code
:rtype: tuple
"""
status_code = getattr(exc, 'status_code', 500)
body = {'class': ['exception', exc.__class__.__name__],
'actions': [], 'entities': [], 'links': [],
'properties': dict(status=status_code, message=six.text_type(exc))}
return json.dumps(body), cls.formats[0], status_code
@classmethod
def format_request(cls, request):
"""
Simply returns request
:param RequestContainer request: The request to handler
:rtype: RequestContainer
"""
return request
|
vertical-knowledge/ripozo
|
ripozo/adapters/siren.py
|
Python
|
gpl-2.0
| 6,055
| 0.001156
|
from PyQt4 import QtCore
from PyQt4 import QtGui
from Action import Speech
from UI.ActionPushButton import ActionPushButton
class BaseStudy(QtGui.QWidget):
def __init__(self):
super(BaseStudy, self).__init__()
self._actionQueue = None
self._nao = None
self._widgets = None
self._buttons = None
#END __init__()
def LEDActive(self):
if self._nao is not None:
self._nao.LEDrandomEyes(1.0, True)
#END if
#END LEDActive()
def LEDNormal(self):
if self._nao is not None:
self._nao.LEDNormal()
#END if
#END LEDNormal()
def setActionQueue(self, actionQueue):
self._actionQueue = actionQueue
#END setActionQueue()
def setNao(self, nao):
if self._nao is not None:
self._nao.connected.disconnect(self.on_nao_connected)
self._nao.disconnected.disconnect(self.on_nao_disconnected)
#END if
self._nao = nao
if self._nao is not None:
self._nao.connected.connect(self.on_nao_connected)
self._nao.disconnected.connect(self.on_nao_disconnected)
#END if
#END setNao()
def speech(self, txt, speed, shaping):
return None
#END speech()
def on_button_clicked(self):
if self._actionQueue is not None:
self._actionQueue.addActions(self.sender().getRobotActions())
#END if
#END on_button_clicked()
def on_nao_connected(self):
pass
#END on_nao_connected()
def on_nao_disconnected(self):
pass
#END on_nao_disconnected()
def on_runSpeech_clicked(self):
if self._actionQueue is not None:
self._actionQueue.addActions(self.sender().getRobotActions())
#END if
#END on_runSpeech_clicked()
def _setupUi(self, general_panel = True, custom_widget = None):
wgtGeneral = None
if general_panel:
wgtGeneral = QtGui.QWidget()
wgtGeneral.setMaximumHeight(80)
wgtGeneral.setMinimumHeight(80)
##################################################
# General Speech
##################################################
self._speechs = [
ActionPushButton(None, "Hello", Speech("Hello")),
ActionPushButton(None, "Thanks", Speech("Thank you")),
ActionPushButton(None, "Sorry", Speech("I'm sorry")),
ActionPushButton(None, "Good", Speech("Good!")),
ActionPushButton(None, "Okay", Speech("Okay")),
ActionPushButton(None, "Yes", Speech("Yes")),
ActionPushButton(None, "No", Speech("No")),
ActionPushButton(None, "Hmmm", Speech("Heum,")),
None,
ActionPushButton(None, "Louder", Speech("Please speak louder")),
ActionPushButton(None, "Say again?", Speech("Can you say one more time?")),
ActionPushButton(None, "Repeat?", Speech("Would you like me to repeat that?")),
ActionPushButton(None, "Understood?", Speech("Do you understand?")),
ActionPushButton(None, "Don't Understand", Speech("I don't understand")),
ActionPushButton(None, "Greeting", Speech("Hello, my name is NAO, nice to meet you")),
ActionPushButton(None, "End Experiment", Speech("Thank you for participating in our experiment")),
]
self._grpSpeech = QtGui.QGroupBox(wgtGeneral)
self._grpSpeech.setTitle("General Speech")
layoutSpeech = QtGui.QVBoxLayout(self._grpSpeech)
layoutSpeech.setMargin(6)
layoutSpeech.addSpacing(3)
widget = QtGui.QWidget(self._grpSpeech)
layout = QtGui.QHBoxLayout(widget)
layout.setMargin(0)
for item in self._speechs:
if item is None:
layoutSpeech.addWidget(widget)
widget = QtGui.QWidget(self._grpSpeech)
layout = QtGui.QHBoxLayout(widget)
layout.setMargin(0)
else:
item.setParent(widget)
item.clicked.connect(self.on_runSpeech_clicked)
layout.addWidget(item)
#END if
#END for
layoutSpeech.addWidget(widget)
#END if
wgtButtons = None
if self._widgets is not None and self._buttons is not None:
wgtButtons = QtGui.QWidget()
layout = QtGui.QHBoxLayout(wgtButtons)
layout.setMargin(0)
for i in range(len(self._widgets)):
layoutButtons = QtGui.QVBoxLayout(self._widgets[i])
layoutButtons.setMargin(0)
for button in self._buttons[i]:
if isinstance(button, ActionPushButton):
button.clicked.connect(self.on_button_clicked)
#END if
layoutButtons.addWidget(button)
#END for
scroll = QtGui.QScrollArea()
scroll.setAlignment(QtCore.Qt.AlignCenter)
scroll.setWidget(self._widgets[i])
layoutScroll = QtGui.QHBoxLayout()
layoutScroll.setMargin(0)
layoutScroll.addWidget(scroll)
layout.addLayout(layoutScroll)
#END for
#END if
if wgtGeneral is not None or wgtButtons is not None or custom_widget is not None:
splitter = QtGui.QSplitter(self)
splitter.setOrientation(QtCore.Qt.Vertical)
layout = QtGui.QHBoxLayout(self)
layout.setMargin(0)
layout.addWidget(splitter)
if wgtGeneral is not None:
wgtGeneral.setParent(splitter)
#END if
if wgtButtons is not None:
wgtButtons.setParent(splitter)
#END if
if custom_widget is not None:
custom_widget.setParent(splitter)
#END if
#END if
#END _setupUi()
#END BaseStudy
|
mattBrzezinski/Hydrogen
|
robot-controller/Study/BaseStudy.py
|
Python
|
mit
| 6,141
| 0.006676
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Function/method decorators that provide timeout and retry logic.
"""
import functools
import itertools
import sys
from devil.android import device_errors
from devil.utils import cmd_helper
from devil.utils import reraiser_thread
from devil.utils import timeout_retry
DEFAULT_TIMEOUT_ATTR = '_default_timeout'
DEFAULT_RETRIES_ATTR = '_default_retries'
def _TimeoutRetryWrapper(f, timeout_func, retries_func, pass_values=False):
""" Wraps a funcion with timeout and retry handling logic.
Args:
f: The function to wrap.
timeout_func: A callable that returns the timeout value.
retries_func: A callable that returns the retries value.
pass_values: If True, passes the values returned by |timeout_func| and
|retries_func| to the wrapped function as 'timeout' and
'retries' kwargs, respectively.
Returns:
The wrapped function.
"""
@functools.wraps(f)
def timeout_retry_wrapper(*args, **kwargs):
timeout = timeout_func(*args, **kwargs)
retries = retries_func(*args, **kwargs)
if pass_values:
kwargs['timeout'] = timeout
kwargs['retries'] = retries
@functools.wraps(f)
def impl():
return f(*args, **kwargs)
try:
if timeout_retry.CurrentTimeoutThreadGroup():
# Don't wrap if there's already an outer timeout thread.
return impl()
else:
desc = '%s(%s)' % (f.__name__, ', '.join(itertools.chain(
(str(a) for a in args),
('%s=%s' % (k, str(v)) for k, v in kwargs.iteritems()))))
return timeout_retry.Run(impl, timeout, retries, desc=desc)
except reraiser_thread.TimeoutError as e:
raise device_errors.CommandTimeoutError(str(e)), None, (
sys.exc_info()[2])
except cmd_helper.TimeoutError as e:
raise device_errors.CommandTimeoutError(str(e)), None, (
sys.exc_info()[2])
return timeout_retry_wrapper
def WithTimeoutAndRetries(f):
"""A decorator that handles timeouts and retries.
'timeout' and 'retries' kwargs must be passed to the function.
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
get_timeout = lambda *a, **kw: kw['timeout']
get_retries = lambda *a, **kw: kw['retries']
return _TimeoutRetryWrapper(f, get_timeout, get_retries)
def WithExplicitTimeoutAndRetries(timeout, retries):
"""Returns a decorator that handles timeouts and retries.
The provided |timeout| and |retries| values are always used.
Args:
timeout: The number of seconds to wait for the decorated function to
return. Always used.
retries: The number of times the decorated function should be retried on
failure. Always used.
Returns:
The actual decorator.
"""
def decorator(f):
get_timeout = lambda *a, **kw: timeout
get_retries = lambda *a, **kw: retries
return _TimeoutRetryWrapper(f, get_timeout, get_retries)
return decorator
def WithTimeoutAndRetriesDefaults(default_timeout, default_retries):
"""Returns a decorator that handles timeouts and retries.
The provided |default_timeout| and |default_retries| values are used only
if timeout and retries values are not provided.
Args:
default_timeout: The number of seconds to wait for the decorated function
to return. Only used if a 'timeout' kwarg is not passed
to the decorated function.
default_retries: The number of times the decorated function should be
retried on failure. Only used if a 'retries' kwarg is not
passed to the decorated function.
Returns:
The actual decorator.
"""
def decorator(f):
get_timeout = lambda *a, **kw: kw.get('timeout', default_timeout)
get_retries = lambda *a, **kw: kw.get('retries', default_retries)
return _TimeoutRetryWrapper(f, get_timeout, get_retries, pass_values=True)
return decorator
def WithTimeoutAndRetriesFromInstance(
default_timeout_name=DEFAULT_TIMEOUT_ATTR,
default_retries_name=DEFAULT_RETRIES_ATTR,
min_default_timeout=None):
"""Returns a decorator that handles timeouts and retries.
The provided |default_timeout_name| and |default_retries_name| are used to
get the default timeout value and the default retries value from the object
instance if timeout and retries values are not provided.
Note that this should only be used to decorate methods, not functions.
Args:
default_timeout_name: The name of the default timeout attribute of the
instance.
default_retries_name: The name of the default retries attribute of the
instance.
min_timeout: Miniumum timeout to be used when using instance timeout.
Returns:
The actual decorator.
"""
def decorator(f):
def get_timeout(inst, *_args, **kwargs):
ret = getattr(inst, default_timeout_name)
if min_default_timeout is not None:
ret = max(min_default_timeout, ret)
return kwargs.get('timeout', ret)
def get_retries(inst, *_args, **kwargs):
return kwargs.get('retries', getattr(inst, default_retries_name))
return _TimeoutRetryWrapper(f, get_timeout, get_retries, pass_values=True)
return decorator
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/devil/android/decorators.py
|
Python
|
mit
| 5,401
| 0.007036
|
# -*- coding: utf-8 -*-
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2y8c!z@9!s%(0=epe+2n3k8+_!$vg--xz^3p4rs)6ov)c^2a*t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blogadmin',
'DjangoUeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
#
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'blog', # app's database name
# 'USER': 'root',
# 'PASSWORD': 'root',
# 'HOST': '127.0.0.1', # localhost or cloudhost
# 'PORT': '3306',
# },
# 'another_db':{
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'app2_database', # app2's database name
# 'USER': 'root',
# 'PASSWORD': 'root',
# 'HOST': '127.0.0.1', # localhost or cloudhost
# 'PORT': '3306',
# },
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
# 公共的static文件夹
STATIC_DIRS = (
os.path.join(BASE_DIR,'static'),
os.path.join(BASE_DIR,'media'),
)
# 上传数据的文件夹
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
STATICFILES_FINDERS = ("django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder",)
|
baike21/blog
|
blog/settings.py
|
Python
|
gpl-3.0
| 4,119
| 0.002444
|
#!/usr/bin/env python
"""
Largest product in a grid
Problem 11
Published on 22 February 2002 at 06:00 pm [Server Time]
In the 20x20 grid below, four numbers along a diagonal line have been marked in red.
The product of these numbers is 26 * 63 * 78 * 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20x20 grid?
"""
THE_GRID = [[int(column) for column in row.split(' ')] for row in
"""
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
""".strip().split('\n')]
"""
A few words about the declaration of THE_GRID:
This is not the easiest thing to digest on first look. I think it is "pythonic"
in its implementation and it allows to copy/paste the grid straight out of the problem
statement without a bunch of mucking around to manually turn it into a 2d array
( or nested lists, actually ). It is arranged as a list of rows. Each row is a
list of numbers for each column in that row. Looking at it, the multi-line string
definition actually converts to a list of strings from the split operation. One
string for each row. The top list comprehension converts each row into a list of
short strings ( the columns ) which are also converted to int.
"""
#------------------------------------------------------------------------------
import operator
#------------------------------------------------------------------------------
def product(iterable):
return reduce(operator.mul, iterable, 1)
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for y_dir in (0, 1):
for x_dir in (0,1):
for i in range(run_length):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for i in range(run_length):
for y_dir in (0, 1):
for x_dir in (0,1):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
highest = 0
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for x_dir, y_dir in [(1, 0), (0, 1), (1, 1)]:
for i in range(run_length):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
highest = 0
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for x_dir, y_dir in [(1, 0), (0, 1), (1, 1)]:
run =[THE_GRID[row+(y_dir*i)][column+x_dir*i] for i in range(run_length)]
result = product(run)
print run, result
#if result > highest:
# highest = result
#return(highest)
#------------------------------------------------------------------------------
def solve():
g = THE_GRID
maxp = 0
rows, cols, path_size = len(g), len(g[0]), 5
for i in range(rows):
for j in range(cols - path_size + 1):
phv = max(product([g[i][j+s] for s in range(path_size)]),
product([g[j+s][i] for s in range(path_size)]))
#phv = max(g[i][j] * g[i][j+1] * g[i][j+2] * g[i][j+3],
# g[j][i] * g[j+1][i] * g[j+2][i] * g[j+3][i])
if i < rows - path_size:
pdd = max(product([g[i+s][j+s] for s in range(path_size)]),
product([g[i+s][j+path_size-s-1] for s in range(path_size)]))
#pdd = max(g[i][j] * g[i+1][j+1] * g[i+2][j+2] * g[i+3][j+3],
# g[i][j+3] * g[i+1][j+2] * g[i+2][j+1] * g[i+3][j])
maxp = max(maxp, phv, pdd)
return maxp
#------------------------------------------------------------------------------
def main():
print "PROBLEM:\n"
for line in __doc__.strip().split('\n'):
print '\t', line
print "\nSOLUTION:"
print "\n\t", solve()
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
slowkid/EulerProject
|
solutions/problem11.py
|
Python
|
unlicense
| 5,822
| 0.009275
|
#!/usr/bin/env python
import json
my_list = range(10)
my_list.append('whatever')
my_list.append('some thing')
my_dict = {
'key1': 'val1',
'key2': 'val2',
'key3': 'val3'
}
my_dict['key4'] = my_list
my_dict['key5'] = False
print my_dict
print json.dumps(my_dict)
with open("my_file.json", "w") as f:
json.dump(my_dict, f)
|
ktbyers/pynet-ons-mar17
|
json_yaml/json_test.py
|
Python
|
apache-2.0
| 335
| 0
|
"""This submodule contains objects for handling different representations of models and model functions.
:synopsis: This submodule contains objects for handling different representations of models and model functions.
.. moduleauthor:: Johannes Gaessler <johannes.gaessler@student.kit.edu>
"""
from ._base import *
from .yaml_drepr import *
|
dsavoiu/kafe2
|
kafe2/fit/representation/model/__init__.py
|
Python
|
gpl-3.0
| 353
| 0.005666
|
from pygfa.graph_element.parser import line, field_validator as fv
SERIALIZATION_ERROR_MESSAGGE = "Couldn't serialize object identified by: "
def _format_exception(identifier, exception):
return SERIALIZATION_ERROR_MESSAGGE + identifier \
+ "\n\t" + repr(exception)
def _remove_common_edge_fields(edge_dict):
edge_dict.pop('eid')
edge_dict.pop('from_node')
edge_dict.pop('from_orn')
edge_dict.pop('to_node')
edge_dict.pop('to_orn')
edge_dict.pop('from_positions')
edge_dict.pop('to_positions')
edge_dict.pop('alignment')
edge_dict.pop('distance')
edge_dict.pop('variance')
def _serialize_opt_fields(opt_fields):
fields = []
for key, opt_field in opt_fields.items():
if line.is_optfield(opt_field):
fields.append(str(opt_field))
return fields
def _are_fields_defined(fields):
try:
for field in fields:
if field is None:
return False
except:
return False
return True
def _check_fields(fields, required_fields):
"""Check if each field has the correct format as
stated from the specification.
"""
try:
for field in range(0, len(required_fields)):
if not fv.is_valid(fields[field], required_fields[field]):
return False
return True
except:
return False
def _check_identifier(identifier):
if not isinstance(identifier, str):
identifier = "'{0}' - id of type {1}.".format(\
str(identifier), \
type(identifier) \
)
return identifier
|
AlgoLab/pygfa
|
pygfa/serializer/utils.py
|
Python
|
mit
| 1,690
| 0.006509
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='base.html'))
)
|
SheepDogInc/django-base
|
project_name/urls.py
|
Python
|
bsd-3-clause
| 294
| 0.003401
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.contrib.data` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@LMDBDataset
@@Optional
@@RandomDataset
@@Reducer
@@SqlDataset
@@TFRecordWriter
@@assert_element_shape
@@batch_and_drop_remainder
@@bucket_by_sequence_length
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@padded_batch_and_drop_remainder
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@read_batch_features
@@rejection_resample
@@reduce_dataset
@@sample_from_datasets
@@scan
@@shuffle_and_repeat
@@sliding_window_batch
@@sloppy_interleave
@@unbatch
@@unique
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.data.python.ops.batching import assert_element_shape
from tensorflow.contrib.data.python.ops.batching import batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import dense_to_sparse_batch
from tensorflow.contrib.data.python.ops.batching import map_and_batch
from tensorflow.contrib.data.python.ops.batching import padded_batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import unbatch
from tensorflow.contrib.data.python.ops.counter import Counter
from tensorflow.contrib.data.python.ops.enumerate_ops import enumerate_dataset
from tensorflow.contrib.data.python.ops.error_ops import ignore_errors
from tensorflow.contrib.data.python.ops.get_single_element import get_single_element
from tensorflow.contrib.data.python.ops.get_single_element import reduce_dataset
from tensorflow.contrib.data.python.ops.grouping import bucket_by_sequence_length
from tensorflow.contrib.data.python.ops.grouping import group_by_reducer
from tensorflow.contrib.data.python.ops.grouping import group_by_window
from tensorflow.contrib.data.python.ops.grouping import Reducer
from tensorflow.contrib.data.python.ops.interleave_ops import choose_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import parallel_interleave
from tensorflow.contrib.data.python.ops.interleave_ops import sample_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import sloppy_interleave
from tensorflow.contrib.data.python.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.contrib.data.python.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.contrib.data.python.ops.parsing_ops import parse_example_dataset
from tensorflow.contrib.data.python.ops.prefetching_ops import copy_to_device
from tensorflow.contrib.data.python.ops.prefetching_ops import prefetch_to_device
from tensorflow.contrib.data.python.ops.random_ops import RandomDataset
from tensorflow.contrib.data.python.ops.readers import CsvDataset
from tensorflow.contrib.data.python.ops.readers import LMDBDataset
from tensorflow.contrib.data.python.ops.readers import make_batched_features_dataset
from tensorflow.contrib.data.python.ops.readers import make_csv_dataset
from tensorflow.contrib.data.python.ops.readers import read_batch_features
from tensorflow.contrib.data.python.ops.readers import SqlDataset
from tensorflow.contrib.data.python.ops.resampling import rejection_resample
from tensorflow.contrib.data.python.ops.scan_ops import scan
from tensorflow.contrib.data.python.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.contrib.data.python.ops.sliding import sliding_window_batch
from tensorflow.contrib.data.python.ops.unique import unique
from tensorflow.contrib.data.python.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
|
AnishShah/tensorflow
|
tensorflow/contrib/data/__init__.py
|
Python
|
apache-2.0
| 5,193
| 0.002696
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from telesto import plot
OFFSET = 1000 # REMEMBER TO CALCULATE MANUALLY!
def main():
path = sys.argv[1]
mean = plot.mean(
plot.rows(path, plot.parse_middleware_line),
("waiting", "parsing", "database", "response")
)
dev = plot.standard_deviation(
plot.rows(path, plot.parse_middleware_line), mean
)
print "#",
for key in mean:
print key,
print
for key in mean:
print mean[key],
print
for key in mean:
print 100.0 * mean[key] / sum(mean.itervalues()),
print
for key in mean:
print dev[key],
print
if __name__ == "__main__":
main()
|
dola/Telesto
|
tools/protocol/telesto/plot/phases.py
|
Python
|
mit
| 718
| 0.001393
|
def len2mask(len):
"""Convert a bit length to a dotted netmask (aka. CIDR to netmask)"""
mask = ''
if not isinstance(len, int) or len < 0 or len > 32:
print "Illegal subnet length: %s (which is a %s)" % \
(str(len), type(len).__name__)
return None
for t in range(4):
if len > 7:
mask += '255.'
else:
dec = 255 - (2**(8 - len) - 1)
mask += str(dec) + '.'
len -= 8
if len < 0:
len = 0
return mask[:-1]
def mask2len(subnet):
"""Convert a dotted netmask to bit length (aka. netmask to CIDR)"""
octets = [int(x) for x in subnet.split(".")]
count = 0
for octet in octets:
highest_bit = 128
while highest_bit > 0:
if octet >= highest_bit:
count = count + 1
octet = octet - highest_bit
highest_bit = highest_bit / 2
else:
return count
return count
class FilterModule(object):
''' utility to convert cidr netmasks into len and reverse '''
def filters(self):
return {'mask2len': mask2len,
'len2mask': len2mask}
|
kili/playbooks
|
filter_plugins/netmask_conversion.py
|
Python
|
gpl-3.0
| 1,194
| 0
|
# coding=utf-8
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "..")))
from db.MysqlUtil import initMysql, execute, select, batchInsert, disconnect
from common.JsonHelper import loadJsonConfig
from api.tushareApi import getSimpleHistoryData
from datetime import datetime, timedelta
from common.LoggerHelper import writeErrorLog, writeWarningLog, writeInfoLog, writeDebugLog, writeLog, writeExceptionLog
from wechat.weChatSender import sendMessageToMySelf
from common.HttpHelper import httpGet
from common.FileHelper import saveFile
import time
import json
# 从同花顺抓取历史行情数据(前复权)
def updateStockHistoryInfoByTHS(stockList):
for stock in stockList:
code = stock[0]
i = 2010
thisYear = datetime.now().year
while (i <= thisYear):
# time.sleep(1)
infos = getStockInfos(code, i)
if infos is None:
continue
for date in infos:
open = infos.get(date).get('open')
close = infos.get(date).get('close')
high = infos.get(date).get('high')
low = infos.get(date).get('low')
volume = infos.get(date).get('volume')
amount = infos.get(date).get('amount')
checkExistSql = unicode("select count(*) from s_stock where code='{0}' and date='{1}'").format(code,
date)
count = select(checkExistSql, False)[0]
if count > 0:
updateSql = unicode(
"update s_stock set volume={2},highPrice={3},lowPrice={4},openPrice={5},closePrice={6},amount='{7}' where code='{0}' and date='{1}'").format(
code, date, volume, high, low, open, close, amount)
execute(updateSql)
print code, date, updateSql
else:
insertSql = unicode(
"insert into s_stock(code,date,timestamp,volume,highPrice,lowPrice,openPrice,closePrice,amount) VALUES ('{0}','{1}',{2},{3},{4},{5},{6},{7},'{8}')").format(
code, date, int(time.mktime(time.strptime(date, '%Y-%m-%d'))), volume, high, low, open, close,
amount)
execute(insertSql)
print code, date, insertSql
i = i + 1
# 解析同花顺年行情数据(前复权)
def getStockInfos(code, year):
try:
url = "http://d.10jqka.com.cn/v2/line/hs_{0}/01/{1}.js".format(code, year)
res = httpGet(url).decode("utf-8")
index = res.find("(")
if (index < 0):
writeErrorLog(unicode("解析行情失败: code:{0}, year:{1}, res:{2}").format(code, year, res))
return []
res = res[index + 1:-1]
writeLog(unicode("获取股票历史行情: code: {0}, year:{1}").format(code, year))
jo = json.loads(res)
dataInfo = jo['data'].split(';')
result = {}
for item in dataInfo:
infos = item.split(',')
dic = {}
dic['open'] = infos[1]
dic['high'] = infos[2]
dic['low'] = infos[3]
dic['close'] = infos[4]
dic['volume'] = infos[5]
dic['amount'] = "{0}亿".format(round(float(infos[6]) / 100000000, 1))
result[datetime.strptime(infos[0], '%Y%m%d').strftime('%Y-%m-%d')] = dic
return result
except Exception, e:
writeErrorLog(unicode("解析行情失败: code:{0}, year:{1}, e:{2}").format(code, year, str(e)))
if "404" in str(e):
return []
else:
return None
def getStockHistoryInfoFromDb():
sql = unicode("SELECT code,count(*) from s_stock GROUP by code HAVING count(*)<20")
data = select(sql)
updateStockHistoryInfoByTHS(data)
def getStockHistoryInfoFromConfig():
stockList = loadJsonConfig(os.path.abspath(os.path.join(os.getcwd(), "../config/newStockList.json")))
updateStockHistoryInfoByTHS(stockList)
def updateAllStockHistoryInfo():
sql = unicode("select code,name from s_stock_info order by code asc")
data = select(sql)
updateStockHistoryInfoByTHS(data)
def updateStockOtherInfo():
sql = unicode("select code,name from s_stock_info order by code asc")
stockList = select(sql)
for stock in stockList:
code = stock[0]
if int(code) < 601126:
continue
selectInfoSql = unicode("select date,closePrice from s_stock where code='{0}' order by date asc").format(code)
data = select(selectInfoSql)
writeLog(unicode("更新股票其他指标数据: code: {0}").format(code))
updataStockBias(code, data, 6)
updataStockBias(code, data, 12)
updataStockBias(code, data, 24)
updateStockMA(code, data, 5)
updateStockMA(code, data, 10)
updateStockMA(code, data, 20)
updateStockMA(code, data, 30)
updateStockMA(code, data, 60)
updateStockMA(code, data, 120)
updateStockMA(code, data, 250)
updateStockChangePercent(code, data)
def updateStockChangePercent(code, data):
for i in range(1, len(data)):
try:
changeAmount = data[i][1] - data[i - 1][1]
changePercent = round(changeAmount * 100 / data[i - 1][1], 2)
updateSql = unicode(
"update s_stock set changePercent={0},changeAmount={1} where code='{2}' and date='{3}'").format(
changePercent, changeAmount, code, data[i][0])
execute(updateSql)
except Exception, e:
writeErrorLog(
unicode("更新涨幅数据失败: code:{0}, i:{1}, date:{2}, closePrice:{3}").format(code, i, data[i][0], data[i][1]))
def updateStockMA(code, data, n):
for i in range(n - 1, len(data)):
j = i
sum = 0
while (i - j < n):
sum = sum + data[j][1]
j = j - 1
avg = round(sum / n, 2)
sql = unicode("update s_stock set MA{0}={1} where code='{2}' and date='{3}'").format(n, avg, code, data[i][0])
execute(sql)
def updataStockBias(code, data, n):
for i in range(n - 1, len(data)):
j = i
sum = 0
while (i - j < n):
sum = sum + data[j][1]
j = j - 1
avg = round(sum / n, 2)
todayClosePrice = float(data[i][1])
bias = 0 if avg == 0 else round((todayClosePrice - avg) * 100 / avg, 2)
number = 1 if n == 6 else (2 if n == 12 else 3)
sql = unicode("update s_stock set BIAS{0}={1} where code='{2}' and date='{3}'").format(number, bias, code,
data[i][0])
execute(sql)
def main(argv):
try:
reload(sys)
sys.setdefaultencoding('utf-8')
# sendMessageToMySelf(unicode("开始查询股票历史行情数据"))
begin = datetime.now()
initMysql()
# getStockHistoryInfoFromDb()
# getStockHistoryInfoFromConfig()
updateStockOtherInfo()
disconnect()
end = datetime.now()
message = unicode("查询股票历史行情数据的任务执行完毕,当前时间:{0},执行用时:{1}").format(datetime.now(), end - begin)
writeLog(message)
sendMessageToMySelf(message)
except:
writeExceptionLog('RealTimeRemindTask Error.')
if __name__ == '__main__':
main(sys.argv)
|
zwffff2015/stock
|
task/GetStockHistoryInfoTask.py
|
Python
|
mit
| 7,619
| 0.004191
|
from . import *
offline_providers = {
'builtin': builtin.ProviderBuiltin,
}
|
luskaner/wps-dict
|
wps_dict/wps_dict/providers/offline/list.py
|
Python
|
gpl-3.0
| 81
| 0
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permissio
"""
import logging
import sleekxmpp
from sleekxmpp.stanza import Message
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin, ElementBase, ET
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0085 import stanza, ChatState
log = logging.getLogger(__name__)
class XEP_0085(BasePlugin):
"""
XEP-0085 Chat State Notifications
"""
name = 'xep_0085'
description = 'XEP-0085: Chat State Notifications'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
self.xmpp.register_handler(
Callback('Chat State',
StanzaPath('message/chat_state'),
self._handle_chat_state))
register_stanza_plugin(Message, stanza.Active)
register_stanza_plugin(Message, stanza.Composing)
register_stanza_plugin(Message, stanza.Gone)
register_stanza_plugin(Message, stanza.Inactive)
register_stanza_plugin(Message, stanza.Paused)
def plugin_end(self):
self.xmpp.remove_handler('Chat State')
def session_bind(self, jid):
self.xmpp.plugin['xep_0030'].add_feature(ChatState.namespace)
def _handle_chat_state(self, msg):
state = msg['chat_state']
log.debug("Chat State: %s, %s", state, msg['from'].jid)
self.xmpp.event('chatstate_%s' % state, msg)
|
tiancj/emesene
|
emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/xep_0085/chat_states.py
|
Python
|
gpl-3.0
| 1,636
| 0
|
"""Support to interface with Sonos players."""
import asyncio
import datetime
import functools as ft
import logging
import socket
import time
import urllib
import async_timeout
import pysonos
import pysonos.snapshot
from pysonos.exceptions import SoCoUPnPException, SoCoException
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE, MEDIA_TYPE_MUSIC, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
ENTITY_MATCH_ALL, STATE_IDLE, STATE_PAUSED, STATE_PLAYING)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.dt import utcnow
from . import (
CONF_ADVERTISE_ADDR, CONF_HOSTS, CONF_INTERFACE_ADDR,
DATA_SERVICE_EVENT, DOMAIN as SONOS_DOMAIN,
ATTR_ALARM_ID, ATTR_ENABLED, ATTR_INCLUDE_LINKED_ZONES, ATTR_MASTER,
ATTR_NIGHT_SOUND, ATTR_SLEEP_TIME, ATTR_SPEECH_ENHANCE, ATTR_TIME,
ATTR_VOLUME, ATTR_WITH_GROUP,
SERVICE_CLEAR_TIMER, SERVICE_JOIN, SERVICE_RESTORE, SERVICE_SET_OPTION,
SERVICE_SET_TIMER, SERVICE_SNAPSHOT, SERVICE_UNJOIN, SERVICE_UPDATE_ALARM)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
DISCOVERY_INTERVAL = 60
# Quiet down pysonos logging to just actual problems.
logging.getLogger('pysonos').setLevel(logging.WARNING)
logging.getLogger('pysonos.data_structures_entry').setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PLAY | SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_SELECT_SOURCE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK |\
SUPPORT_PLAY_MEDIA | SUPPORT_SHUFFLE_SET | SUPPORT_CLEAR_PLAYLIST
DATA_SONOS = 'sonos_media_player'
SOURCE_LINEIN = 'Line-in'
SOURCE_TV = 'TV'
ATTR_SONOS_GROUP = 'sonos_group'
UPNP_ERRORS_TO_IGNORE = ['701', '711', '712']
class SonosData:
"""Storage class for platform global data."""
def __init__(self, hass):
"""Initialize the data."""
self.entities = []
self.topology_condition = asyncio.Condition()
async def async_setup_platform(hass,
config,
async_add_entities,
discovery_info=None):
"""Set up the Sonos platform. Obsolete."""
_LOGGER.error(
'Loading Sonos by media_player platform config is no longer supported')
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Sonos from a config entry."""
if DATA_SONOS not in hass.data:
hass.data[DATA_SONOS] = SonosData(hass)
config = hass.data[SONOS_DOMAIN].get('media_player', {})
advertise_addr = config.get(CONF_ADVERTISE_ADDR)
if advertise_addr:
pysonos.config.EVENT_ADVERTISE_IP = advertise_addr
def _discovery(now=None):
"""Discover players from network or configuration."""
hosts = config.get(CONF_HOSTS)
def _discovered_player(soco):
"""Handle a (re)discovered player."""
try:
# Make sure that the player is available
_ = soco.volume
entity = _get_entity_from_soco_uid(hass, soco.uid)
if not entity:
hass.add_job(async_add_entities, [SonosEntity(soco)])
else:
entity.seen()
except SoCoException:
pass
if hosts:
for host in hosts:
try:
player = pysonos.SoCo(socket.gethostbyname(host))
if player.is_visible:
_discovered_player(player)
except (OSError, SoCoException):
if now is None:
_LOGGER.warning("Failed to initialize '%s'", host)
else:
pysonos.discover_thread(
_discovered_player,
interface_addr=config.get(CONF_INTERFACE_ADDR))
for entity in hass.data[DATA_SONOS].entities:
entity.check_unseen()
hass.helpers.event.call_later(DISCOVERY_INTERVAL, _discovery)
hass.async_add_executor_job(_discovery)
async def async_service_handle(service, data):
"""Handle dispatched services."""
entity_ids = data.get('entity_id')
entities = hass.data[DATA_SONOS].entities
if entity_ids and entity_ids != ENTITY_MATCH_ALL:
entities = [e for e in entities if e.entity_id in entity_ids]
if service == SERVICE_JOIN:
master = [e for e in hass.data[DATA_SONOS].entities
if e.entity_id == data[ATTR_MASTER]]
if master:
await SonosEntity.join_multi(hass, master[0], entities)
elif service == SERVICE_UNJOIN:
await SonosEntity.unjoin_multi(hass, entities)
elif service == SERVICE_SNAPSHOT:
await SonosEntity.snapshot_multi(
hass, entities, data[ATTR_WITH_GROUP])
elif service == SERVICE_RESTORE:
await SonosEntity.restore_multi(
hass, entities, data[ATTR_WITH_GROUP])
else:
for entity in entities:
if service == SERVICE_SET_TIMER:
call = entity.set_sleep_timer
elif service == SERVICE_CLEAR_TIMER:
call = entity.clear_sleep_timer
elif service == SERVICE_UPDATE_ALARM:
call = entity.set_alarm
elif service == SERVICE_SET_OPTION:
call = entity.set_option
hass.async_add_executor_job(call, data)
# We are ready for the next service call
hass.data[DATA_SERVICE_EVENT].set()
async_dispatcher_connect(hass, SONOS_DOMAIN, async_service_handle)
class _ProcessSonosEventQueue:
"""Queue like object for dispatching sonos events."""
def __init__(self, handler):
"""Initialize Sonos event queue."""
self._handler = handler
def put(self, item, block=True, timeout=None):
"""Process event."""
self._handler(item)
def _get_entity_from_soco_uid(hass, uid):
"""Return SonosEntity from SoCo uid."""
for entity in hass.data[DATA_SONOS].entities:
if uid == entity.unique_id:
return entity
return None
def soco_error(errorcodes=None):
"""Filter out specified UPnP errors from logs and avoid exceptions."""
def decorator(funct):
"""Decorate functions."""
@ft.wraps(funct)
def wrapper(*args, **kwargs):
"""Wrap for all soco UPnP exception."""
try:
return funct(*args, **kwargs)
except SoCoUPnPException as err:
if errorcodes and err.error_code in errorcodes:
pass
else:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
except SoCoException as err:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
return wrapper
return decorator
def soco_coordinator(funct):
"""Call function on coordinator."""
@ft.wraps(funct)
def wrapper(entity, *args, **kwargs):
"""Wrap for call to coordinator."""
if entity.is_coordinator:
return funct(entity, *args, **kwargs)
return funct(entity.coordinator, *args, **kwargs)
return wrapper
def _timespan_secs(timespan):
"""Parse a time-span into number of seconds."""
if timespan in ('', 'NOT_IMPLEMENTED', None):
return None
return sum(60 ** x[0] * int(x[1]) for x in enumerate(
reversed(timespan.split(':'))))
def _is_radio_uri(uri):
"""Return whether the URI is a radio stream."""
radio_schemes = (
'x-rincon-mp3radio:', 'x-sonosapi-stream:', 'x-sonosapi-radio:',
'x-sonosapi-hls:', 'hls-radio:')
return uri.startswith(radio_schemes)
class SonosEntity(MediaPlayerDevice):
"""Representation of a Sonos entity."""
def __init__(self, player):
"""Initialize the Sonos entity."""
self._seen = None
self._subscriptions = []
self._receives_events = False
self._volume_increment = 2
self._unique_id = player.uid
self._player = player
self._model = None
self._player_volume = None
self._player_muted = None
self._shuffle = None
self._name = None
self._coordinator = None
self._sonos_group = [self]
self._status = None
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self._night_sound = None
self._speech_enhance = None
self._source_name = None
self._available = True
self._favorites = None
self._soco_snapshot = None
self._snapshot_group = None
self._set_basic_information()
self.seen()
async def async_added_to_hass(self):
"""Subscribe sonos events."""
self.hass.data[DATA_SONOS].entities.append(self)
self.hass.async_add_executor_job(self._subscribe_to_player_events)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
def __hash__(self):
"""Return a hash of self."""
return hash(self.unique_id)
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def device_info(self):
"""Return information about the device."""
return {
'identifiers': {
(SONOS_DOMAIN, self._unique_id)
},
'name': self._name,
'model': self._model.replace("Sonos ", ""),
'manufacturer': 'Sonos',
}
@property
@soco_coordinator
def state(self):
"""Return the state of the entity."""
if self._status in ('PAUSED_PLAYBACK', 'STOPPED'):
return STATE_PAUSED
if self._status in ('PLAYING', 'TRANSITIONING'):
return STATE_PLAYING
return STATE_IDLE
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._coordinator is None
@property
def soco(self):
"""Return soco object."""
return self._player
@property
def coordinator(self):
"""Return coordinator of this player."""
return self._coordinator
def seen(self):
"""Record that this player was seen right now."""
self._seen = time.monotonic()
if self._available:
return
self._available = True
self._set_basic_information()
self._subscribe_to_player_events()
self.schedule_update_ha_state()
def check_unseen(self):
"""Make this player unavailable if it was not seen recently."""
if not self._available:
return
if self._seen < time.monotonic() - 2*DISCOVERY_INTERVAL:
self._available = False
def _unsub(subscriptions):
for subscription in subscriptions:
subscription.unsubscribe()
self.hass.add_job(_unsub, self._subscriptions)
self._subscriptions = []
self.schedule_update_ha_state()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def _set_basic_information(self):
"""Set initial entity information."""
speaker_info = self.soco.get_speaker_info(True)
self._name = speaker_info['zone_name']
self._model = speaker_info['model_name']
self._shuffle = self.soco.shuffle
self.update_volume()
self._set_favorites()
def _set_favorites(self):
"""Set available favorites."""
favorites = self.soco.music_library.get_sonos_favorites()
# Exclude favorites that are non-playable due to no linked resources
self._favorites = [f for f in favorites if f.reference.resources]
def _radio_artwork(self, url):
"""Return the private URL with artwork for a radio stream."""
if url not in ('', 'NOT_IMPLEMENTED', None):
if url.find('tts_proxy') > 0:
# If the content is a tts don't try to fetch an image from it.
return None
url = 'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self.soco.ip_address,
port=1400,
uri=urllib.parse.quote(url, safe='')
)
return url
def _subscribe_to_player_events(self):
"""Add event subscriptions."""
self._receives_events = False
# New player available, build the current group topology
for entity in self.hass.data[DATA_SONOS].entities:
entity.update_groups()
player = self.soco
def subscribe(service, action):
"""Add a subscription to a pysonos service."""
queue = _ProcessSonosEventQueue(action)
sub = service.subscribe(auto_renew=True, event_queue=queue)
self._subscriptions.append(sub)
subscribe(player.avTransport, self.update_media)
subscribe(player.renderingControl, self.update_volume)
subscribe(player.zoneGroupTopology, self.update_groups)
subscribe(player.contentDirectory, self.update_content)
def update(self):
"""Retrieve latest state."""
if self._available and not self._receives_events:
try:
self.update_groups()
self.update_volume()
if self.is_coordinator:
self.update_media()
except SoCoException:
pass
def update_media(self, event=None):
"""Update information about currently playing media."""
transport_info = self.soco.get_current_transport_info()
new_status = transport_info.get('current_transport_state')
# Ignore transitions, we should get the target state soon
if new_status == 'TRANSITIONING':
return
self._shuffle = self.soco.shuffle
update_position = (new_status != self._status)
self._status = new_status
if self.soco.is_playing_tv:
self.update_media_linein(SOURCE_TV)
elif self.soco.is_playing_line_in:
self.update_media_linein(SOURCE_LINEIN)
else:
track_info = self.soco.get_current_track_info()
if _is_radio_uri(track_info['uri']):
variables = event and event.variables
self.update_media_radio(variables, track_info)
else:
self.update_media_music(update_position, track_info)
self.schedule_update_ha_state()
# Also update slaves
for entity in self.hass.data[DATA_SONOS].entities:
coordinator = entity.coordinator
if coordinator and coordinator.unique_id == self.unique_id:
entity.schedule_update_ha_state()
def update_media_linein(self, source):
"""Update state when playing from line-in/tv."""
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_artist = source
self._media_album_name = None
self._media_title = None
self._source_name = source
def update_media_radio(self, variables, track_info):
"""Update state when streaming radio."""
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
media_info = self.soco.avTransport.GetMediaInfo([('InstanceID', 0)])
self._media_image_url = self._radio_artwork(media_info['CurrentURI'])
self._media_artist = track_info.get('artist')
self._media_album_name = None
self._media_title = track_info.get('title')
if self._media_artist and self._media_title:
# artist and album name are in the data, concatenate
# that do display as artist.
# "Information" field in the sonos pc app
self._media_artist = '{artist} - {title}'.format(
artist=self._media_artist,
title=self._media_title
)
elif variables:
# "On Now" field in the sonos pc app
current_track_metadata = variables.get('current_track_meta_data')
if current_track_metadata:
self._media_artist = \
current_track_metadata.radio_show.split(',')[0]
# For radio streams we set the radio station name as the title.
current_uri_metadata = media_info["CurrentURIMetaData"]
if current_uri_metadata not in ('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
current_uri_metadata = pysonos.xml.XML.fromstring(
pysonos.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
self._media_title = md_title
if self._media_artist and self._media_title:
# some radio stations put their name into the artist
# name, e.g.:
# media_title = "Station"
# media_artist = "Station - Artist - Title"
# detect this case and trim from the front of
# media_artist for cosmetics
trim = '{title} - '.format(title=self._media_title)
chars = min(len(self._media_artist), len(trim))
if self._media_artist[:chars].upper() == trim[:chars].upper():
self._media_artist = self._media_artist[chars:]
# Check if currently playing radio station is in favorites
self._source_name = None
for fav in self._favorites:
if fav.reference.get_uri() == media_info['CurrentURI']:
self._source_name = fav.title
def update_media_music(self, update_media_position, track_info):
"""Update state when playing music tracks."""
self._media_duration = _timespan_secs(track_info.get('duration'))
position_info = self.soco.avTransport.GetPositionInfo(
[('InstanceID', 0),
('Channel', 'Master')]
)
rel_time = _timespan_secs(position_info.get("RelTime"))
# player no longer reports position?
update_media_position |= rel_time is None and \
self._media_position is not None
# player started reporting position?
update_media_position |= rel_time is not None and \
self._media_position is None
# position jumped?
if (self.state == STATE_PLAYING
and rel_time is not None
and self._media_position is not None):
time_diff = utcnow() - self._media_position_updated_at
time_diff = time_diff.total_seconds()
calculated_position = self._media_position + time_diff
update_media_position |= abs(calculated_position - rel_time) > 1.5
if update_media_position:
self._media_position = rel_time
self._media_position_updated_at = utcnow()
self._media_image_url = track_info.get('album_art')
self._media_artist = track_info.get('artist')
self._media_album_name = track_info.get('album')
self._media_title = track_info.get('title')
self._source_name = None
def update_volume(self, event=None):
"""Update information about currently volume settings."""
if event:
variables = event.variables
if 'volume' in variables:
self._player_volume = int(variables['volume']['Master'])
if 'mute' in variables:
self._player_muted = (variables['mute']['Master'] == '1')
if 'night_mode' in variables:
self._night_sound = (variables['night_mode'] == '1')
if 'dialog_level' in variables:
self._speech_enhance = (variables['dialog_level'] == '1')
self.schedule_update_ha_state()
else:
self._player_volume = self.soco.volume
self._player_muted = self.soco.mute
self._night_sound = self.soco.night_mode
self._speech_enhance = self.soco.dialog_mode
def update_groups(self, event=None):
"""Handle callback for topology change event."""
def _get_soco_group():
"""Ask SoCo cache for existing topology."""
coordinator_uid = self.unique_id
slave_uids = []
try:
if self.soco.group and self.soco.group.coordinator:
coordinator_uid = self.soco.group.coordinator.uid
slave_uids = [p.uid for p in self.soco.group.members
if p.uid != coordinator_uid]
except SoCoException:
pass
return [coordinator_uid] + slave_uids
async def _async_extract_group(event):
"""Extract group layout from a topology event."""
group = event and event.zone_player_uui_ds_in_group
if group:
return group.split(',')
return await self.hass.async_add_executor_job(_get_soco_group)
def _async_regroup(group):
"""Rebuild internal group layout."""
sonos_group = []
for uid in group:
entity = _get_entity_from_soco_uid(self.hass, uid)
if entity:
sonos_group.append(entity)
self._coordinator = None
self._sonos_group = sonos_group
self.async_schedule_update_ha_state()
for slave_uid in group[1:]:
slave = _get_entity_from_soco_uid(self.hass, slave_uid)
if slave:
# pylint: disable=protected-access
slave._coordinator = self
slave._sonos_group = sonos_group
slave.async_schedule_update_ha_state()
async def _async_handle_group_event(event):
"""Get async lock and handle event."""
async with self.hass.data[DATA_SONOS].topology_condition:
group = await _async_extract_group(event)
if self.unique_id == group[0]:
_async_regroup(group)
self.hass.data[DATA_SONOS].topology_condition.notify_all()
if event:
self._receives_events = True
if not hasattr(event, 'zone_player_uui_ds_in_group'):
return
self.hass.add_job(_async_handle_group_event(event))
def update_content(self, event=None):
"""Update information about available content."""
self._set_favorites()
self.schedule_update_ha_state()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player_volume / 100
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player_muted
@property
@soco_coordinator
def shuffle(self):
"""Shuffling state."""
return self._shuffle
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
@soco_coordinator
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
@soco_coordinator
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
@soco_coordinator
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
@soco_coordinator
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url or None
@property
@soco_coordinator
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
@soco_coordinator
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album_name
@property
@soco_coordinator
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
@soco_coordinator
def source(self):
"""Name of the current input source."""
return self._source_name
@property
@soco_coordinator
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SONOS
@soco_error()
def volume_up(self):
"""Volume up media player."""
self._player.volume += self._volume_increment
@soco_error()
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self._volume_increment
@soco_error()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def set_shuffle(self, shuffle):
"""Enable/Disable shuffle mode."""
self.soco.shuffle = shuffle
@soco_error()
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.soco.mute = mute
@soco_error()
@soco_coordinator
def select_source(self, source):
"""Select input source."""
if source == SOURCE_LINEIN:
self.soco.switch_to_line_in()
elif source == SOURCE_TV:
self.soco.switch_to_tv()
else:
fav = [fav for fav in self._favorites
if fav.title == source]
if len(fav) == 1:
src = fav.pop()
uri = src.reference.get_uri()
if _is_radio_uri(uri):
self.soco.play_uri(uri, title=source)
else:
self.soco.clear_queue()
self.soco.add_to_queue(src.reference)
self.soco.play_from_queue(0)
@property
@soco_coordinator
def source_list(self):
"""List of available input sources."""
sources = [fav.title for fav in self._favorites]
model = self._model.upper()
if 'PLAY:5' in model or 'CONNECT' in model:
sources += [SOURCE_LINEIN]
elif 'PLAYBAR' in model:
sources += [SOURCE_LINEIN, SOURCE_TV]
elif 'BEAM' in model:
sources += [SOURCE_TV]
return sources
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_play(self):
"""Send play command."""
self.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_stop(self):
"""Send stop command."""
self.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_pause(self):
"""Send pause command."""
self.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_next_track(self):
"""Send next track command."""
self.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_previous_track(self):
"""Send next track command."""
self.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_seek(self, position):
"""Send seek command."""
self.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
@soco_coordinator
def clear_playlist(self):
"""Clear players playlist."""
self.soco.clear_queue()
@soco_error()
@soco_coordinator
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if kwargs.get(ATTR_MEDIA_ENQUEUE):
try:
self.soco.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error('Error parsing media uri "%s", '
"please check it's a valid media resource "
'supported by Sonos', media_id)
else:
self.soco.play_uri(media_id)
@soco_error()
def join(self, slaves):
"""Form a group with other players."""
if self._coordinator:
self.unjoin()
group = [self]
else:
group = self._sonos_group.copy()
for slave in slaves:
if slave.unique_id != self.unique_id:
slave.soco.join(self.soco)
# pylint: disable=protected-access
slave._coordinator = self
if slave not in group:
group.append(slave)
return group
@staticmethod
async def join_multi(hass, master, entities):
"""Form a group with other players."""
async with hass.data[DATA_SONOS].topology_condition:
group = await hass.async_add_executor_job(master.join, entities)
await SonosEntity.wait_for_groups(hass, [group])
@soco_error()
def unjoin(self):
"""Unjoin the player from a group."""
self.soco.unjoin()
self._coordinator = None
@staticmethod
async def unjoin_multi(hass, entities):
"""Unjoin several players from their group."""
def _unjoin_all(entities):
"""Sync helper."""
# Unjoin slaves first to prevent inheritance of queues
coordinators = [e for e in entities if e.is_coordinator]
slaves = [e for e in entities if not e.is_coordinator]
for entity in slaves + coordinators:
entity.unjoin()
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_unjoin_all, entities)
await SonosEntity.wait_for_groups(hass, [[e] for e in entities])
@soco_error()
def snapshot(self, with_group):
"""Snapshot the state of a player."""
self._soco_snapshot = pysonos.snapshot.Snapshot(self.soco)
self._soco_snapshot.snapshot()
if with_group:
self._snapshot_group = self._sonos_group.copy()
else:
self._snapshot_group = None
@staticmethod
async def snapshot_multi(hass, entities, with_group):
"""Snapshot all the entities and optionally their groups."""
# pylint: disable=protected-access
def _snapshot_all(entities):
"""Sync helper."""
for entity in entities:
entity.snapshot(with_group)
# Find all affected players
entities = set(entities)
if with_group:
for entity in list(entities):
entities.update(entity._sonos_group)
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_snapshot_all, entities)
@soco_error()
def restore(self):
"""Restore a snapshotted state to a player."""
try:
# pylint: disable=protected-access
self._soco_snapshot.restore()
except (TypeError, AttributeError, SoCoException) as ex:
# Can happen if restoring a coordinator onto a current slave
_LOGGER.warning("Error on restore %s: %s", self.entity_id, ex)
self._soco_snapshot = None
self._snapshot_group = None
@staticmethod
async def restore_multi(hass, entities, with_group):
"""Restore snapshots for all the entities."""
# pylint: disable=protected-access
def _restore_groups(entities, with_group):
"""Pause all current coordinators and restore groups."""
for entity in (e for e in entities if e.is_coordinator):
if entity.state == STATE_PLAYING:
entity.media_pause()
groups = []
if with_group:
# Unjoin slaves first to prevent inheritance of queues
for entity in [e for e in entities if not e.is_coordinator]:
if entity._snapshot_group != entity._sonos_group:
entity.unjoin()
# Bring back the original group topology
for entity in (e for e in entities if e._snapshot_group):
if entity._snapshot_group[0] == entity:
entity.join(entity._snapshot_group)
groups.append(entity._snapshot_group.copy())
return groups
def _restore_players(entities):
"""Restore state of all players."""
for entity in (e for e in entities if not e.is_coordinator):
entity.restore()
for entity in (e for e in entities if e.is_coordinator):
entity.restore()
# Find all affected players
entities = set(e for e in entities if e._soco_snapshot)
if with_group:
for entity in [e for e in entities if e._snapshot_group]:
entities.update(entity._snapshot_group)
async with hass.data[DATA_SONOS].topology_condition:
groups = await hass.async_add_executor_job(
_restore_groups, entities, with_group)
await SonosEntity.wait_for_groups(hass, groups)
await hass.async_add_executor_job(_restore_players, entities)
@staticmethod
async def wait_for_groups(hass, groups):
"""Wait until all groups are present, or timeout."""
# pylint: disable=protected-access
def _test_groups(groups):
"""Return whether all groups exist now."""
for group in groups:
coordinator = group[0]
# Test that coordinator is coordinating
current_group = coordinator._sonos_group
if coordinator != current_group[0]:
return False
# Test that slaves match
if set(group[1:]) != set(current_group[1:]):
return False
return True
try:
with async_timeout.timeout(5):
while not _test_groups(groups):
await hass.data[DATA_SONOS].topology_condition.wait()
except asyncio.TimeoutError:
_LOGGER.warning("Timeout waiting for target groups %s", groups)
for entity in hass.data[DATA_SONOS].entities:
entity.soco._zgs_cache.clear()
@soco_error()
@soco_coordinator
def set_sleep_timer(self, data):
"""Set the timer on the player."""
self.soco.set_sleep_timer(data[ATTR_SLEEP_TIME])
@soco_error()
@soco_coordinator
def clear_sleep_timer(self, data):
"""Clear the timer on the player."""
self.soco.set_sleep_timer(None)
@soco_error()
@soco_coordinator
def set_alarm(self, data):
"""Set the alarm clock on the player."""
from pysonos import alarms
alarm = None
for one_alarm in alarms.get_alarms(self.soco):
# pylint: disable=protected-access
if one_alarm._alarm_id == str(data[ATTR_ALARM_ID]):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("did not find alarm with id %s",
data[ATTR_ALARM_ID])
return
if ATTR_TIME in data:
alarm.start_time = data[ATTR_TIME]
if ATTR_VOLUME in data:
alarm.volume = int(data[ATTR_VOLUME] * 100)
if ATTR_ENABLED in data:
alarm.enabled = data[ATTR_ENABLED]
if ATTR_INCLUDE_LINKED_ZONES in data:
alarm.include_linked_zones = data[ATTR_INCLUDE_LINKED_ZONES]
alarm.save()
@soco_error()
def set_option(self, data):
"""Modify playback options."""
if ATTR_NIGHT_SOUND in data and self._night_sound is not None:
self.soco.night_mode = data[ATTR_NIGHT_SOUND]
if ATTR_SPEECH_ENHANCE in data and self._speech_enhance is not None:
self.soco.dialog_mode = data[ATTR_SPEECH_ENHANCE]
@property
def device_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {
ATTR_SONOS_GROUP: [e.entity_id for e in self._sonos_group],
}
if self._night_sound is not None:
attributes[ATTR_NIGHT_SOUND] = self._night_sound
if self._speech_enhance is not None:
attributes[ATTR_SPEECH_ENHANCE] = self._speech_enhance
return attributes
|
aequitas/home-assistant
|
homeassistant/components/sonos/media_player.py
|
Python
|
apache-2.0
| 37,431
| 0
|
"""
The latest version of this package is available at:
<http://github.com/jantman/webhook2lambda2sqs>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of webhook2lambda2sqs, also known as webhook2lambda2sqs.
webhook2lambda2sqs is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
webhook2lambda2sqs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with webhook2lambda2sqs. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/webhook2lambda2sqs> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
VERSION = '0.2.0'
PROJECT_URL = 'https://github.com/jantman/webhook2lambda2sqs'
|
jantman/webhook2lambda2sqs
|
webhook2lambda2sqs/version.py
|
Python
|
agpl-3.0
| 1,938
| 0.000516
|
import os
CACHE_PORT = 8080
HTTP_PORT = 8000
HTTPS_PORT = 4430
DEFAULT_MAX_AGE = 3600
DEFAULT_LOG_PATH = os.path.dirname(__file__)
DEFAULT_LOG_FILE = os.path.join(DEFAULT_LOG_PATH, 'default-hendrix.log')
|
JamesTFarrington/hendrix
|
hendrix/defaults.py
|
Python
|
mit
| 209
| 0
|
# SET THE FOLLOWING EITHER BEFORE RUNNING THIS FILE OR BELOW, BEFORE INITIALIZING
# PRAW!
# set variables for interacting with reddit.
# my_user_agent = ""
# my_username = ""
# my_password = ""
# reddit_post_id = -1
# import praw - install: pip install praw
# Praw doc: https://praw.readthedocs.org/en/latest/index.html
import praw
# python base imports
import datetime
import pprint
# import python_utilities
from python_utilities.logging.summary_helper import SummaryHelper
# declare variables.
my_summary_helper = None
r = None
post = None
comments = None
flat_comments = None
test_comment = None
comment_prop_map = None
summary_string = ""
# set variables for interacting with reddit.
my_user_agent = "<user_agent>"
my_username = "<reddit_username>"
my_password = "<reddit_password>"
#reddit_post_id = "1cp0i3"
reddit_post_id = "1bvkol"
# init summary helper.
my_summary_helper = SummaryHelper()
print( "Starting PRAW test at " + str( start_dt ) )
# set user agent.
r = praw.Reddit( user_agent = my_user_agent )
# got login set?
if ( ( ( my_username ) and ( my_username != "" ) ) and ( ( my_password ) and ( my_password != "" ) ) ):
# yes. Login.
r.login( my_username, my_password )
print( "==> Logged in." )
#-- END check to see if we log in. --#
print( "==> Created reddit instance." )
# retrieve post
# - post with lots of comments - 1bvkol has 22014
#reddit_post_id = "1bvkol"
post = r.get_submission( submission_id = reddit_post_id, comment_limit = 1500, comment_sort = "old" )
print( "Retrieved post " + str( reddit_post_id ) )
# output number of comments based on post
print( "==> post.permalink: " + post.permalink )
print( "==> post.num_comments: " + str( post.num_comments ) )
# use the replace_more_comments() method to pull in as many comments as possible.
post.replace_more_comments( limit = None, threshold = 0 )
print( "==> After replace_more_comments()" )
# get the comments
comments = post.comments
# print out number of comments
print( "==> len( comments ): " + str( len( comments ) ) ) # 3,915 and counting
# these are objects where parent comments reference children. flatten...
flat_comments = praw.helpers.flatten_tree( post.comments )
# how many now?
print( "==> after flatten_tree(), comment count: " + str ( len( flat_comments ) ) ) # 13364 - closer to 22000, but still not all of them.
# get a comment
test_comment = flat_comments[ 0 ]
print( "Looking at comment 0:")
# what is in it?
print( "==> str( comment ): " + str( test_comment ) ) # outputs the text of comment, nothing more.
# reddit ID of comment:
print( "==> comment id: " + str( test_comment.id ) )
# body of comment
print( "==> comment body: " + test_comment.body )
# to get map of property names to values in a praw object:
comment_prop_map = vars( test_comment )
# pretty-print it with pprint library.
pprint.pprint( comment_prop_map )
'''
Example:
{'_info_url': 'http://www.reddit.com/api/info/',
'_replies': [<praw.objects.MoreComments object at 0x4867550>],
'_submission': <praw.objects.Submission object at 0x4867790>,
'_underscore_names': ['replies'],
'approved_by': None,
'author': Redditor(user_name='worldclasssteez'),
'author_flair_css_class': None,
'author_flair_text': None,
'banned_by': None,
'body': u'Using the "J" and "K" keys on VLC player to sync up the audio. ',
'body_html': u'<div class="md"><p>Using the &quot;J&quot; and &quot;K&quot; keys on VLC player to sync up the audio. </p>\n</div>',
'created': 1365399487.0,
'created_utc': 1365395887.0,
'distinguished': None,
'downs': 44,
'edited': False,
'gilded': 0,
'has_fetched': True,
'id': u'c9ap3fp',
'json_dict': None,
'likes': None,
'link_id': u't3_1bvkol',
'name': u't1_c9ap3fp',
'num_reports': None,
'parent_id': u't3_1bvkol',
'reddit_session': <praw.Reddit object at 0x48539d0>,
'saved': False,
'score_hidden': False,
'subreddit': Subreddit(display_name='AskReddit'),
'subreddit_id': u't5_2qh1i',
'ups': 201}
'''
# each name in that map can be invoked as a variable on the object itself.
# test summary counter
my_summary_helper.set_prop_value( "comment_count", 0 )
# look at utc date order of comments:
# for comment in flat_comments[ 0:15 ]:
for index in range( 0, len( flat_comments ) ):
# get vars
comment = flat_comments[ index ]
comment_prop_map = vars( comment )
created_from_map = comment_prop_map[ 'created_utc' ]
created_from_obj = comment.created_utc
created_dt = datetime.datetime.fromtimestamp( created_from_map )
comment_id = comment.name
print( "==> " + str( index ) + " ( " + comment_id + " ) - Created UTC: " + str( created_from_map ) + " (map); " + str( created_from_obj ) + " (obj); " + str( created_dt ) )
# increment comment count
my_summary_helper.increment_prop_value( "comment_count" )
#-- END loop over comments --#
print( "==> Created: " + str( created_from_map ) + " (map); " + str( created_from_obj ) + " (obj); " + str( created_dt ) )
summary_string = "\nPRAW testing complete!\n"
# generate summary string
# set stop time.
my_summary_helper.set_stop_time()
# generate summary string.
summary_string += my_summary_helper.create_summary_string( item_prefix_IN = "==> " )
print( summary_string )
|
jonathanmorgan/reddit_collect
|
examples/praw_testing.py
|
Python
|
gpl-3.0
| 5,265
| 0.02754
|
"""
Fiber-based fixed point location in the Lorenz system
f(v)[0] = s*(v[1]-v[0])
f(v)[1] = r*v[0] - v[1] - v[0]*v[2]
f(v)[2] = v[0]*v[1] - b*v[2]
Reference:
http://www.emba.uvm.edu/~jxyang/teaching/Math266notes13.pdf
https://en.wikipedia.org/wiki/Lorenz_system
"""
import numpy as np
import matplotlib.pyplot as pt
import scipy.integrate as si
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
from mpl_toolkits.mplot3d import Axes3D
N = 3
s, b, r = 10, 8./3., 28
def f(v):
return np.array([
s*(v[1,:]-v[0,:]),
r*v[0,:] - v[1,:] - v[0,:]*v[2,:],
v[0,:]*v[1,:] - b*v[2,:],
])
def ef(v):
return 0.001*np.ones((N,1))
def Df(v):
Dfv = np.empty((v.shape[1],3,3))
Dfv[:,0,0], Dfv[:,0,1], Dfv[:,0,2] = -s, s, 0
Dfv[:,1,0], Dfv[:,1,1], Dfv[:,1,2] = r - v[2], -1, -v[0]
Dfv[:,2,0], Dfv[:,2,1], Dfv[:,2,2] = v[1], v[0], -b
return Dfv
if __name__ == "__main__":
# Collect attractor points
t = np.arange(0,40,0.01)
v = np.ones((N,1))
A = si.odeint(lambda v, t: f(v.reshape((N,1))).flatten(), v.flatten(), t).T
# Set up fiber arguments
v = np.zeros((N,1))
# c = np.random.randn(N,1)
c = np.array([[0.83736021, -1.87848114, 0.43935044]]).T
fiber_kwargs = {
"f": f,
"ef": ef,
"Df": Df,
"compute_step_amount": lambda trace: (0.1, 0, False),
"v": v,
"c": c,
"terminate": lambda trace: (np.fabs(trace.x[:N,:]) > 50).any(),
"max_step_size": 1,
"max_traverse_steps": 2000,
"max_solve_iterations": 2**5,
}
print("using c:")
print(c.T)
# Visualize strange attractor
ax = pt.gca(projection="3d")
ax.plot(*A, color='gray', linestyle='-', alpha=0.5)
br1 = np.sqrt(b*(r-1))
U = np.array([[0, 0, 0],[br1,br1,r-1],[-br1,-br1,r-1]]).T
ax.scatter(*U, color='black')
# Run and visualize fiber components, for each fxpt
xlims, ylims, zlims = [-20,20], [-30,30], [-20,60]
for fc in [0,2]:
# start from current fxpt
fiber_kwargs["v"] = U[:,[fc]]
# ax.text(U[0,fc],U[1,fc],U[2,fc], str(fc))
# Run in one direction
solution = sv.fiber_solver(**fiber_kwargs)
V1 = np.concatenate(solution["Fiber trace"].points, axis=1)[:N,:]
z = solution["Fiber trace"].z_initial
# Run in other direction (negate initial tangent)
fiber_kwargs["z"] = -z
solution = sv.fiber_solver(**fiber_kwargs)
V2 = np.concatenate(solution["Fiber trace"].points, axis=1)[:N,:]
# Join fiber segments, restrict to figure limits
V = np.concatenate((np.fliplr(V1), V2), axis=1)
V = V[:,::50]
for i, (lo, hi) in enumerate([xlims, ylims, zlims]):
V = V[:,(lo < V[i,:]) & (V[i,:] < hi)]
C = f(V)
# Visualize fiber
ax.plot(*V, color='black', linestyle='-')
ax.quiver(*np.concatenate((V,.1*C),axis=0),color='black')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(elev=15,azim=145)
pt.tight_layout()
pt.show()
|
garrettkatz/directional-fibers
|
dfibers/examples/lorenz.py
|
Python
|
mit
| 3,181
| 0.019491
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOCS
# =============================================================================
"""Synthetic light curve generator.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .base import Data
# =============================================================================
# CONSTANTS
# =============================================================================
DS_NAME = "feets-synthetic"
DESCRIPTION = "Lightcurve created with random numbers"
BANDS = ("B", "V")
METADATA = None
DEFAULT_SIZE = 10000
# =============================================================================
# FUNCTIONS
# =============================================================================
def create_random(
magf,
magf_params,
errf,
errf_params,
timef=np.linspace,
timef_params=None,
size=DEFAULT_SIZE,
id=None,
ds_name=DS_NAME,
description=DESCRIPTION,
bands=BANDS,
metadata=METADATA,
):
"""Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
"""
timef_params = (
{"start": 0.0, "stop": 1.0}
if timef_params is None
else timef_params.copy()
)
timef_params.update(num=size)
magf_params = magf_params.copy()
magf_params.update(size=size)
errf_params = errf_params.copy()
errf_params.update(size=size)
data = {}
for band in bands:
data[band] = {
"time": timef(**timef_params),
"magnitude": magf(**magf_params),
"error": errf(**errf_params),
}
return Data(
id=id,
ds_name=ds_name,
description=description,
bands=bands,
metadata=metadata,
data=data,
)
def create_normal(
mu=0.0, sigma=1.0, mu_err=0.0, sigma_err=1.0, seed=None, **kwargs
):
"""Generate a data with magnitudes that follows a Gaussian
distribution. Also their errors are gaussian.
Parameters
----------
mu : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma : float (default=1)
Standar deviation of the gaussian distribution of magnitude errors
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = create_normal(0, 1, 0, .0008, seed=42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B
LightCurve(time[10000], magnitude[10000], error[10000])
>>> ds.data.B.time
array([ 0.00000000e+00, 1.00010001e-04, 2.00020002e-04, ...,
9.99799980e-01, 9.99899990e-01, 1.00000000e+00])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.normal,
magf_params={"loc": mu, "scale": sigma},
errf=random.normal,
errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs,
)
def create_uniform(
low=0.0, high=1.0, mu_err=0.0, sigma_err=1.0, seed=None, **kwargs
):
"""Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.uniform,
magf_params={"low": low, "high": high},
errf=random.normal,
errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs,
)
def create_periodic(mu_err=0.0, sigma_err=1.0, seed=None, **kwargs):
"""Generate a data with magnitudes with periodic variability
distribution; the error instead are gaussian.
Parameters
----------
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_periodic(bands=["Ks"])
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('Ks',))
>>> ds.data.Ks.magnitude
array([ 0.95428053, 0.73022685, 0.03005121, ..., -0.26305297,
2.57880082, 1.03376863])
"""
random = np.random.RandomState(seed)
size = kwargs.get("size", DEFAULT_SIZE)
times, mags, errors = [], [], []
for b in kwargs.get("bands", BANDS):
time = 100 * random.rand(size)
error = random.normal(size=size, loc=mu_err, scale=sigma_err)
mag = np.sin(2 * np.pi * time) + error * random.randn(size)
times.append(time)
errors.append(error)
mags.append(mag)
times, mags, errors = iter(times), iter(mags), iter(errors)
return create_random(
magf=lambda **k: next(mags),
magf_params={},
errf=lambda **k: next(errors),
errf_params={},
timef=lambda **k: next(times),
timef_params={},
**kwargs,
)
|
carpyncho/feets
|
feets/datasets/synthetic.py
|
Python
|
mit
| 10,431
| 0
|
# Say hello to Django
|
obiwanus/django-qurl
|
qurl/models.py
|
Python
|
mit
| 22
| 0
|
# Python Code From Book
# This file consists of code snippets only
# It is not intended to be run as a script
raise SystemExit
####################################################################
# 3. Thinking in Binary
####################################################################
import magic
print magic.from_file("my_image.jpg")
# JPEG image data, Exif standard: [TIFF image data, big-endian,
# direntries=16, height=3264, bps=0, PhotometricIntepretation=RGB],
# baseline, precision 8, 2378x2379, frames 3
if magic.from_file("upload.jpg", mime=True) == "image/jpeg":
continue_uploading("upload.jpg")
else:
alert("Sorry! This file type is not allowed")
import imghdr
print imghdr.what("path/to/my/file.ext")
import binascii
def spoof_file(file, magic_number):
magic_number = binascii.unhexlify(magic_number)
with open(file, "r+b") as f:
old = f.read()
f.seek(0)
f.write(magic_number + old)
def to_ascii_bytes(string):
return " ".join(format(ord(char), '08b') for char in string)
string = "my ascii string"
"".join(hex(ord(char))[2:] for char in string)
# '6d7920617363696920737472696e67'
hex_string = "6d7920617363696920737472696e67"
hex_string.decode("hex")
# 'my ascii string'
"".join(chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2))
# 'my ascii string'
# adapted from https://code.activestate.com/recipes/142812-hex-dumper/
def hexdump(string, length=8):
result = []
digits = 4 if isinstance(string, unicode) else 2
for i in xrange(0, len(string), length):
s = string[i:i + length]
hexa = "".join("{:0{}X}".format(ord(x), digits) for x in s)
text = "".join(x if 0x20 <= ord(x) < 0x7F else '.' for x in s)
result.append("{:04X} {:{}} {}".format(i, hexa, length * (digits + 1), text))
return '\n'.join(result)
with open("/path/to/my_file.ext", "r") as f:
print hexdump(f.read())
import struct
num = 0x103e4
struct.pack("I", 0x103e4)
# '\xe4\x03\x01\x00'
string = '\xe4\x03\x01\x00'
struct.unpack("i", string)
# (66532,)
bytes = '\x01\xc2'
struct.pack("<h", struct.unpack(">h", bytes)[0])
# '\xc2\x01'
import base64
base64.b64encode('encodings are fun...')
# 'ZW5jb2RpbmdzIGFyZSBmdW4uLi4='
base64.b64decode(_)
# 'encodings are fun...'
string = "hello\x00"
binary_string = ' '.join('{:08b}'.format(ord(char)) for char in string)
" ".join(binary_string[i:i+6] for i in range(0, len(binary_string), 6))
# '011010 000110 010101 101100 011011 000110 111100 000000'
bin_string = '011010 000110 010101 101100 011011 000110 111100 000000'
[int(b, 2) for b in bin_string.split()]
# [26, 6, 21, 44, 27, 6, 60, 0]
u'◑ \u2020'.encode('utf8')
# '\xe2\x97\x91 \xe2\x80\xa0'
'\xe2\x97\x91 \xe2\x80\xa0'.decode('utf8')
# u'\u25d1 \u2020'
unicode('\xe2\x97\x91 \xe2\x80\xa0', encoding='utf8')
# u'\u25d1 \u2020'
utf8_string = 'Åêíòü'
utf8_string
# '\xc3\x85\xc3\xaa\xc3\xad\xc3\xb2\xc3\xbc'
unicode_string = utf8_string.decode('utf8')
unicode_string
# u'\xc5\xea\xed\xf2\xfc'
unicode_string.encode('mac roman')
# '\x81\x90\x92\x98\x9f'
'Åêíòü'.decode('utf8').encode('ascii')
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-4: ordinal not in range(128)
file = """潍楪慢敫椠桴慧扲敬整瑸琠慨⁴獩琠敨爠獥汵⁴景琠硥⁴敢湩敤潣敤獵湩湡甠楮瑮湥敤档
牡捡整湥潣楤杮楷桴挠浯汰瑥汥⁹湵敲慬整湯獥景整牦浯愠搠晩敦敲瑮眠楲楴杮猠獹整‧⠊慔敫
牦浯攠楷楫数楤牯⥧"""
print file.decode('utf8').encode('utf16')
# ??Mojibake is the garbled text that is the result of text being decoded using an
# unintended character encoding with completely unrelated ones, often from a
# different writing system.' (Taken from en.wikipedia.org)
import ftfy
ftfy.fix_text(u"“Mojibake“ can be fixed.")
# u'"Mojibake" can be fixed.'
bin(0b1010 & 0b1111110111)
# '0b10'
bin(0b1010 | 0b0110)
# '0b1110'
bin(0b10111 | 0b01000)
# '0b11111'
bin(0b100 ^ 0b110)
# '0b10'
bin(-0b1010 >> 0b10)
# '-0b11'
x = 0b1111
y = 0b1010
bin(int("{:b}{:b}".format(x, y), 2))
# '0b11111010'
bin(x << 4 | y)
# '0b11111010'
####################################################################
# 4. Cryptography
####################################################################
import random
import string
r = random.SystemRandom()
# Get a random integer between 0 and 20
r.randint(0, 20)
# 5
# Get a random number between 0 and 1
r.random()
# 0.8282475835972263
# Generate a random 40-bit number
r.getrandbits(40)
# 595477188771L
# Choose a random item from a string or list
chars = string.printable
r.choice(chars)
# 'e'
# Randomize the order of a sequence
seq = ['a', 'b', 'c', 'd', 'e']
r.shuffle(seq)
print seq
# ['c','d', 'a', 'e', 'b']
"ALLIGATOR".encode('rot13')
# 'NYYVTNGBE'
"NYYVTNGBE".encode('rot13')
# 'ALLIGATOR'
plaintext = "A secret-ish message!"
"".join(chr((ord(c) + 20) % 256) for c in plaintext)
# 'U4\x87yw\x86y\x88A}\x87|4\x81y\x87\x87u{y5'
ciphertext = 'U4\x87yw\x86y\x88A}\x87|4\x81y\x87\x87u{y5'
"".join(chr((ord(c) - 20) % 256) for c in ciphertext)
# 'A secret-ish message!'
plaintext = 0b110100001101001
one_time_pad = 0b110000011100001
bin(plaintext ^ one_time_pad)
# '0b100010001000'
decrypted = 0b100010001000 ^ one_time_pad
format(decrypted, 'x').decode('hex')
# 'hi'
import os
import binascii
# ASCII-encoded plaintext
plaintext = "this is a secret message"
plaintext_bits = int(binascii.hexlify(plaintext), 16)
print "plaintext (ascii):", plaintext
print "plaintext (hex):", plaintext_bits
# Generate the one-time pad
onetime_pad = int(binascii.hexlify(os.urandom(len(plaintext))), 16)
print "one-time pad: (hex):", onetime_pad
# Encrypt plaintext using XOR operation with one-time pad
ciphertext_bits = plaintext_bits ^ onetime_pad
print "encrypted text (hex):", ciphertext_bits
# Decrypt using XOR operation with one-time pad
decrypted_text = ciphertext_bits ^ onetime_pad
decrypted_text = binascii.unhexlify(hex(decrypted_text)[2:-1])
print "decrypted text (ascii):", decrypted_text
import random
import binascii
p1 = "this is the part where you run away"
p2 = "from bad cryptography practices."
# pad plaintexts with spaces to ensure equal length
p1 = p1.ljust(len(p2))
p2 = p2.ljust(len(p1))
p1 = int(binascii.hexlify(p1), 16)
p2 = int(binascii.hexlify(p2), 16)
# get random one-time pad
otp = random.SystemRandom().getrandbits(p1.bit_length())
# encrypt
c1 = p1 ^ otp
c2 = p2 ^ otp # otp reuse...not good!
print "c1 ^ c2 == p1 ^ p2 ?", c1 ^ c2 == p1 ^ p2
print "c1 ^ c2 =", hex(c1 ^ c2)
# the crib
crib = " the "
crib = int(binascii.hexlify(crib), 16)
xored = c1 ^ c2
print "crib =", hex(crib)
cbl = crib.bit_length()
xbl = xored.bit_length()
print
mask = (2**(cbl + 1) - 1)
fill = len(str(xbl / 8))
# crib dragging
for s in range(0, xbl - cbl + 8, 8):
xor = (xored ^ (crib << s)) & (mask << s)
out = binascii.unhexlify(hex(xor)[2:-1])
print "{:>{}} {}".format(s/8, fill, out)
from cryptography.fernet import Fernet
key = Fernet.generate_key()
f = Fernet(key)
ciphertext = f.encrypt("this is my plaintext")
decrypted = f.decrypt(ciphertext)
print decrypted
# this is my plaintext
import os
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
pt = "my plaintext"
backend = default_backend()
key = os.urandom(32)
iv = os.urandom(16)
padder = padding.PKCS7(128).padder()
pt = padder.update(pt) + padder.finalize()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(pt) + encryptor.finalize()
decryptor = cipher.decryptor()
out = decryptor.update(ct) + decryptor.finalize()
unpadder = padding.PKCS7(128).unpadder()
out = unpadder.update(out) + unpadder.finalize()
print out
import hashlib
hashlib.md5("hash me please").hexdigest()
# '760d92b6a6f974ae11904cd0a6fc2e90'
hashlib.sha1("hash me please").hexdigest()
# '1a58c9b3d138a45519518ee42e634600d1b52153'
import os
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
from cryptography.hazmat.backends import default_backend
backend = default_backend()
salt = os.urandom(16)
kdf = Scrypt(salt=salt, length=64, n=2**14, r=8, p=1, backend=backend)
key = kdf.derive("your favorite password")
key
import hmac
import hashlib
secret_key = "my secret key"
ciphertext = "my ciphertext"
# generate HMAC
h = hmac.new(key=secret_key, msg=ciphertext, digestmod=hashlib.sha256)
print h.hexdigest()
# verify HMAC
hmac.compare_digest(h.hexdigest(), h.hexdigest())
p = 9576890767
q = 1299827
n = p * q
print n
# 12448301194997309
e = 65537
phi = (p - 1) * (q - 1)
phi % e != 0
# True
import sympy
d = sympy.numbers.igcdex(e, phi)[0]
print d
# 1409376745910033
m = 12345
c = pow(m, e, n)
print c
# 3599057382134015
pow(c, d, n)
# 12345
m = 0
while pow(m, e, n) != c:
m += 1
print m
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, b
ackend=default_backend())
public_key = private_key.public_key()
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption('your password here'))
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
print public_pem
print private_pem
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import base64
with open("path/to/public_key.pem", "rb") as key_file:
public_key = serialization.load_pem_public_key(key_file.read(),
backend=default_backend())
message = "your secret message"
ciphertext = public_key.encrypt(message,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
b64_ciphertext = base64.urlsafe_b64encode(ciphertext)
print b64_ciphertext
plaintext = private_key.decrypt(ciphertext,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
print plaintext
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
signer = private_key.signer(padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
message = "A message of arbitrary length"
signer.update(message)
signature = signer.finalize()
public_key = private_key.public_key()
verifier = public_key.verifier(signature, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
verifier.update(message)
verifier.verify()
####################################################################
# 5. Networking
####################################################################
import requests
r = requests.get('https://www.google.com/imghp')
r.content[:200]
# View status code
r.status_code
# 200
# View response header fields
r.headers
# {'Alt-Svc': 'quic=":443"; ma=2592000; v="36,35,34"',
# 'Cache-Control': 'private, max-age=0',
# 'Content-Encoding': 'gzip',
# 'Content-Type': 'text/html; charset=ISO-8859-1',
# 'Expires': '-1',
# 'P3P': 'CP="This is not a P3P policy! See https://www.google.com/support/accounts/answer/151657?hl=en for more info."',
# 'Server': 'gws',
# path=/; domain=.google.com; HttpOnly',
# 'Transfer-Encoding': 'chunked',
# 'X-Frame-Options': 'SAMEORIGIN',
# 'X-XSS-Protection': '1; mode=block'}
# Get content length in bytes
len(r.content)
# 10971
# Encoding
r.apparent_encoding
# 'ISO-8859-2'
# Time elapsed during request
r.elapsed
# datetime.timedelta(0, 0, 454447)
r.request.headers
# {'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Connection': 'keep-alive',
# 'User-Agent': 'python-requests/2.12.4'}
custom_headers = {"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"}
r = requests.get("https://www.google.com/imghp", headers=custom_headers)
r.request.headers
# {'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Connection': 'keep-alive',
# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
import requests
import logging
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
r = requests.get('https://www.google.com/')
# send: 'GET / HTTP/1.1\r\nHost: www.google.com\r\nConnection: keep-alive\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nUser-Agent: python-requests/2.12.4\r\n\r\n'
# reply: 'HTTP/1.1 200 OK\r\n'
# header: Expires: -1
# header: Cache-Control: private, max-age=0
# header: Content-Type: text/html; charset=ISO-8859-1
# header: P3P: CP="This is not a P3P policy! See https://www.google.com/support/accounts/answer/151657?hl=en for more info."
# header: Content-Encoding: gzip
# header: Server: gws
# header: X-XSS-Protection: 1; mode=block
# header: X-Frame-Options: SAMEORIGIN
import urlparse
simple_url = "http://www.example.com/path/to/my/page"
parsed = urlparse.urlparse(simple_url)
parsed.scheme
parsed.hostname
parsed.path
url_with_query = "http://www.example.com/?page=1&key=Anvn4mo24"
query = urlparse.urlparse(url_with_query).query
urlparse.parse_qs(query)
# {'key': ['Anvn4mo24'], 'page': ['1']}
import urllib
url = 'https://www.example.com/%5EA-url-with-%-and-%5E?page=page+with%20spaces'
urllib.unquote(url)
# 'https://www.example.com/^A-url-with-%-and-^?page=page+with spaces'
chars = '!@#$%^%$#)'
urllib.quote(chars)
# '%21%40%23%24%25%5E%25%24%23%29'
urllib.unquote_plus(url)
# 'https://www.example.com/^A-url-with-%-and-^?page=page with spaces'
urllib.quote_plus('one two')
'one+two'
import requests
from bs4 import BeautifulSoup
r = requests.get("http://www.google.com")
soup = BeautifulSoup(r.content, "lxml")
soup.find_all('p')
soup.find_all('a')
# [<a class="gb1" href="http://www.google.com/imghp?hl=en&tab=wi">Images</a>,
# <a class="gb1" href="http://maps.google.com/maps?hl=en&tab=wl">Maps</a>,
# <a class="gb1" href="https://play.google.com/?hl=en&tab=w8">Play</a>,
# <a class="gb1" href="http://www.youtube.com/?tab=w1">YouTube</a>,
# <a class="gb1" href="http://news.google.com/nwshp?hl=en&tab=wn">News</a>,
# …]
for link in soup.find_all('a'):
print link.text, link["href"]
# Images http://www.google.com/imghp?hl=en&tab=wi
# Maps http://maps.google.com/maps?hl=en&tab=wl
# Play https://play.google.com/?hl=en&tab=w8
# YouTube http://www.youtube.com/?tab=w1
import dryscrape
from bs4 import BeautifulSoup
session = dryscrape.Session()
session.visit("http://www.google.com")
r = session.body()
soup = BeautifulSoup(r, "lxml")
from selenium import webdriver
driver = webdriver.Chrome("/path/to/chromedriver")
driver.get("http://www.google.com")
html = driver.page_source
driver.save_screenshot("screenshot.png")
driver.quit()
import smtplib
server = smtplib.SMTP('localhost', port=1025)
server.set_debuglevel(True)
server.sendmail("me@localhost", "you@localhost", "This is an email message")
server.quit()
|
0ppen/introhacking
|
code_from_book.py
|
Python
|
mit
| 16,192
| 0.000628
|
#! /usr/bin/env python3
import math, sys
import shtns
import numpy as np
class shtnsfiledata:
#
# Adopted from https://bitbucket.org/nschaeff/shtns/src/master/examples/shallow_water.py
#
def __init__(
self,
rsphere = 1.0
):
self.rsphere = rsphere
def setup(self, file_info, anti_aliasing=False):
import shtns
import numpy as np
if file_info['modes_m_max'] != file_info['modes_m_max']:
raise Exception("Only num_lon == num_lat supported")
ntrunc = file_info['modes_n_max']
self._shtns = shtns.sht(ntrunc, ntrunc, 1, shtns.sht_orthonormal+shtns.SHT_NO_CS_PHASE)
nlons = (ntrunc + 1) * 2
nlats = (ntrunc + 1)
if anti_aliasing:
if nlons & 1:
raise Exception("Only even numbers of longitudinal coordinates allowed for anti-aliasing")
if nlats & 1:
raise Exception("Only even numbers of latitudinal coordinates allowed for anti-aliasing")
print("Anti-aliasing:")
print(" + old lon/lat: ", nlons, nlats)
nlons += nlons//2
nlats += nlats//2
print(" + new lon/lat: ", nlons, nlats)
if file_info['grid_type'] == 'GAUSSIAN':
#self._shtns.set_grid(nlats,nlons,shtns.sht_gauss_fly|shtns.SHT_PHI_CONTIGUOUS, 1.e-10)
self._shtns.set_grid(nlats, nlons, shtns.sht_quick_init|shtns.SHT_PHI_CONTIGUOUS, 0)
elif file_info['grid_type'] == 'REGULAR':
#self._shtns.set_grid(nlats,nlons,shtns.sht_reg_dct|shtns.SHT_PHI_CONTIGUOUS, 1.e-10)
self._shtns.set_grid(nlats, nlons, shtns.sht_reg_dct|shtns.SHT_PHI_CONTIGUOUS, 0)
else:
raise Exception("Grid type '"+file_info['grid_type']+"' not supported!")
self.lats = np.arcsin(self._shtns.cos_theta)
self.lons = (2.*np.pi/nlons)*np.arange(nlons)
self.nlons = nlons
self.nlats = nlats
self.ntrunc = ntrunc
self.nlm = self._shtns.nlm
self.degree = self._shtns.l
self.lap = -self.degree*(self.degree+1.0).astype(np.complex)
self.invlap = np.zeros(self.lap.shape, self.lap.dtype)
self.invlap[1:] = 1./self.lap[1:]
self.lap = self.lap/self.rsphere**2
self.invlap = self.invlap*self.rsphere**2
def phys2spec(self, data):
return self._shtns.analys(data)
def spec2phys(self, dataspec):
return self._shtns.synth(dataspec)
def vrtdiv2uv(self, vrtspec, divspec):
return self._shtns.synth((self.invlap/self.rsphere)*vrtspec, (self.invlap/self.rsphere)*divspec)
def uv2vrtdiv(self,u,v):
vrtspec, divspec = self._shtns.analys(u, v)
return self.lap*self.rsphere*vrtspec, self.lap*self.rsphere*divspec
def getuv(self,divspec):
vrtspec = np.zeros(divspec.shape, dtype=np.complex)
u,v = self._shtns.synth(vrtspec,divspec)
return u, v
def rotateX90(self, i_field):
return self._shtns.Xrotate90(i_field)
def rotateY90(self, i_field):
return self._shtns.Yrotate90(i_field)
def rotateZ90(self, i_field, angle):
return self._shtns.Zrotate(i_field, angle)
|
schreiberx/sweet
|
mule_local/python/mule_local/postprocessing/shtnsfiledata.py
|
Python
|
mit
| 3,227
| 0.008057
|
# -*- coding: utf-8 -*-
import inspect
import random
import numpy as np
from django.contrib import messages
from django.views.generic import RedirectView
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from django.contrib.auth.mixins import UserPassesTestMixin
class RunActionView(UserPassesTestMixin, RedirectView):
"""
Runs common Actions for Systems and Techniques
"""
permanent = False
#: Available Actions
ACTIONS = {
"perform_inference": {
"type": "object",
"str": "PERFORMING INFERENCE",
"method": "perform_inference",
"kwargs": {
"recalculate": True
},
},
"reset_inference": {
"type": "object",
"str": "RESETING INFERENCE",
"method": "reset_inference",
"kwargs": {},
},
"reinitialize_rng": {
"type": "general",
"str": "REINITIALIZING RNG",
"method": "action_reinitialize_rng",
"kwargs": {},
}
}
def test_func(self):
return(self.request.user.is_superuser)
def action_reinitialize_rng(self):
"""
Reinitialize both generators
"""
random.seed()
np.random.seed()
def get_ct_object(self, content_type, object_id):
ct = ContentType.objects.get(model=content_type)
return(ct.model_class().objects.get(id=object_id))
def run_action(self, action, action_object=None):
try:
if action_object:
action_method = getattr(action_object, action['method'])
else:
action_method = getattr(self, action['method'])
action_method(**action['kwargs'])
messages.success(self.request,
"SUCCESS AT {}".format(action['str']))
except Exception as e:
msg = e.args[0]
frm = inspect.trace()[-1]
mod = inspect.getmodule(frm[0])
modname = mod.__name__ if mod else frm[1]
messages.error(self.request,
"ERROR WHILE {}: [{}] {}".format(
action['str'], modname, str(msg)))
def get_redirect_url(self, *args, **kwargs):
if kwargs['action'] not in self.ACTIONS:
raise Http404("Action not Found")
if self.ACTIONS[kwargs['action']]["type"] == 'object':
action_object = self.get_ct_object(kwargs['content_type'],
kwargs['object_id'])
else:
action_object = None
self.run_action(self.ACTIONS[kwargs['action']], action_object)
return(self.request.META.get('HTTP_REFERER', '/'))
|
math-a3k/django-ai
|
django_ai/base/views.py
|
Python
|
lgpl-3.0
| 2,787
| 0
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""A writer for chemical JSON (CJSON) files."""
try:
import openbabel as ob
has_openbabel = True
except ImportError:
has_openbabel = False
import os.path
import json
from . import filewriter
class CJSON(filewriter.Writer):
"""A writer for chemical JSON (CJSON) files."""
def __init__(self, ccdata, *args, **kwargs):
"""Initialize the chemical JSON writer object.
Inputs:
ccdata - An instance of ccData, parsed from a logfile.
"""
# Call the __init__ method of the superclass
super(CJSON, self).__init__(ccdata, *args, **kwargs)
self.generate_repr()
def generate_repr(self):
"""Generate the CJSON representation of the logfile data."""
cjson_dict = dict()
cjson_dict['chemical json'] = 0
if self.jobfilename is not None:
cjson_dict['name'] = os.path.splitext(self.jobfilename)[0]
# These are properties that can be collected using Open Babel.
if has_openbabel:
cjson_dict['smiles'] = self.pbmol.write('smiles')
cjson_dict['inchi'] = self.pbmol.write('inchi')
cjson_dict['inchikey'] = self.pbmol.write('inchikey')
cjson_dict['formula'] = self.pbmol.formula
cjson_dict['atoms'] = dict()
cjson_dict['atoms']['elements'] = dict()
cjson_dict['atoms']['elements']['number'] = self.ccdata.atomnos.tolist()
cjson_dict['atoms']['coords'] = dict()
cjson_dict['atoms']['coords']['3d'] = self.ccdata.atomcoords[-1].flatten().tolist()
cjson_dict['bonds'] = dict()
cjson_dict['bonds']['connections'] = dict()
cjson_dict['bonds']['connections']['index'] = []
if has_openbabel:
for bond in self.bond_connectivities:
cjson_dict['bonds']['connections']['index'].append(bond[0] + 1)
cjson_dict['bonds']['connections']['index'].append(bond[1] + 1)
cjson_dict['bonds']['order'] = [bond[2] for bond in self.bond_connectivities]
cjson_dict['properties'] = dict()
if has_openbabel:
cjson_dict['properties']['molecular mass'] = self.pbmol.molwt
cjson_dict['atomCount'] = len(self.ccdata.atomnos)
cjson_dict['heavyAtomCount'] = len([x for x in self.ccdata.atomnos if x > 1])
if has_openbabel:
cjson_dict['diagram'] = self.pbmol.write(format='svg')
# These are properties that can be collected using cclib.
# Do there need to be any unit conversions here?
homo_idx_alpha = int(self.ccdata.homos[0])
homo_idx_beta = int(self.ccdata.homos[-1])
energy_alpha_homo = self.ccdata.moenergies[0][homo_idx_alpha]
energy_alpha_lumo = self.ccdata.moenergies[0][homo_idx_alpha + 1]
energy_alpha_gap = energy_alpha_lumo - energy_alpha_homo
energy_beta_homo = self.ccdata.moenergies[-1][homo_idx_beta]
energy_beta_lumo = self.ccdata.moenergies[-1][homo_idx_beta + 1]
energy_beta_gap = energy_beta_lumo - energy_beta_homo
cjson_dict['energy'] = dict()
cjson_dict['energy']['total'] = self.ccdata.scfenergies[-1]
cjson_dict['energy']['alpha'] = dict()
cjson_dict['energy']['alpha']['homo'] = energy_alpha_homo
cjson_dict['energy']['alpha']['lumo'] = energy_alpha_lumo
cjson_dict['energy']['alpha']['gap'] = energy_alpha_gap
cjson_dict['energy']['beta'] = dict()
cjson_dict['energy']['beta']['homo'] = energy_beta_homo
cjson_dict['energy']['beta']['lumo'] = energy_beta_lumo
cjson_dict['energy']['beta']['gap'] = energy_beta_gap
cjson_dict['totalDipoleMoment'] = self._calculate_total_dipole_moment()
# Can/should we append the entire original log file?
# cjson_dict['files'] = dict()
# cjson_dict['files']['log'] = []
# cjson_dict['files']['log'].append()
return json.dumps(cjson_dict)
if __name__ == "__main__":
pass
|
ghutchis/cclib
|
src/cclib/writer/cjsonwriter.py
|
Python
|
lgpl-2.1
| 4,465
| 0.000896
|
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_eval(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.eval([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_eval_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.eval([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEquals(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in xrange(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def _test_attributes(self):
# XXX: This test is bugged and creates weird errors -- skipped
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
self.assertEquals(h.covariance_type, self.covariance_type)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.means_ = self.means
self.assertEquals(h.n_features, self.n_features)
self.assertRaises(ValueError, h.__setattr__, 'means_', [])
self.assertRaises(ValueError, h.__setattr__, 'means_',
np.zeros((self.n_components - 2, self.n_features)))
h.covars_ = self.covars[self.covariance_type]
assert_array_almost_equal(h.covars_,
self.expanded_covars[self.covariance_type])
#self.assertRaises(ValueError, h.__setattr__, 'covars', [])
#self.assertRaises(ValueError, h.__setattr__, 'covars',
# np.zeros((self.n_components - 2, self.n_features)))
def test_eval_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(range(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEquals(h.n_symbols, self.n_symbols)
def test_eval(self):
idx = np.repeat(range(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEquals(len(samples), n)
self.assertEquals(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
self.assertTrue(np.all(np.diff(trainll) > - 1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in xrange(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in xrange(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_eval_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms:
g.means_ *= 20
refstateseq = np.repeat(range(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms)
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms = self.gmms
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10,
random_state=self.prng)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sklearn/tests/test_hmm.py
|
Python
|
agpl-3.0
| 26,383
| 0.000493
|
from functools import partial
from navmazing import NavigateToSibling, NavigateToAttribute
from cfme import Credential
from cfme.exceptions import CandidateNotFound, OptionNotAvailable
import cfme.fixtures.pytest_selenium as sel
import cfme.web_ui.toolbar as tb
from cfme.web_ui import (
AngularSelect, Form, Select, CheckboxTree, accordion, fill, flash,
form_buttons, Input, Table, UpDownSelect, CFMECheckbox, BootstrapTreeview)
from cfme.web_ui.form_buttons import change_stored_password
from utils import version
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from utils.log import logger
from utils.pretty import Pretty
from utils.update import Updateable
tb_select = partial(tb.select, "Configuration")
pol_btn = partial(tb.select, "Policy")
edit_tags_form = Form(
fields=[
("select_tag", Select("select#tag_cat")),
("select_value", Select("select#tag_add"))
])
tag_table = Table("//div[@id='assignments_div']//table")
users_table = Table("//div[@id='records_div']//table")
group_order_selector = UpDownSelect(
"select#seq_fields",
"//img[@alt='Move selected fields up']",
"//img[@alt='Move selected fields down']")
def simple_user(userid, password):
creds = Credential(principal=userid, secret=password)
return User(name=userid, credential=creds)
class User(Updateable, Pretty, Navigatable):
user_form = Form(
fields=[
('name_txt', Input('name')),
('userid_txt', Input('userid')),
('password_txt', Input('password')),
('password_verify_txt', Input('verify')),
('email_txt', Input('email')),
('user_group_select', AngularSelect('chosen_group')),
])
pretty_attrs = ['name', 'group']
def __init__(self, name=None, credential=None, email=None, group=None, cost_center=None,
value_assign=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.credential = credential
self.email = email
self.group = group
self.cost_center = cost_center
self.value_assign = value_assign
self._restore_user = None
def __enter__(self):
if self._restore_user != self.appliance.user:
from cfme.login import logout
logger.info('Switching to new user: %s', self.credential.principal)
self._restore_user = self.appliance.user
logout()
self.appliance.user = self
def __exit__(self, *args, **kwargs):
if self._restore_user != self.appliance.user:
from cfme.login import logout
logger.info('Restoring to old user: %s', self._restore_user.credential.principal)
logout()
self.appliance.user = self._restore_user
self._restore_user = None
def create(self):
navigate_to(self, 'Add')
fill(self.user_form, {'name_txt': self.name,
'userid_txt': self.credential.principal,
'password_txt': self.credential.secret,
'password_verify_txt': self.credential.verify_secret,
'email_txt': self.email,
'user_group_select': getattr(self.group,
'description', None)},
action=form_buttons.add)
flash.assert_success_message('User "{}" was saved'.format(self.name))
def update(self, updates):
navigate_to(self, 'Edit')
change_stored_password()
new_updates = {}
if 'credential' in updates:
new_updates.update({
'userid_txt': updates.get('credential').principal,
'password_txt': updates.get('credential').secret,
'password_verify_txt': updates.get('credential').verify_secret
})
if self.appliance.version >= '5.7':
self.name = updates.get('credential').principal
new_updates.update({
'name_txt': updates.get('name'),
'email_txt': updates.get('email'),
'user_group_select': getattr(
updates.get('group'),
'description', None)
})
fill(self.user_form, new_updates, action=form_buttons.save)
flash.assert_success_message(
'User "{}" was saved'.format(updates.get('name', self.name)))
def copy(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Copy this User to a new User')
new_user = User(name=self.name + "copy",
credential=Credential(principal='redhat', secret='redhat'))
change_stored_password()
fill(self.user_form, {'name_txt': new_user.name,
'userid_txt': new_user.credential.principal,
'password_txt': new_user.credential.secret,
'password_verify_txt': new_user.credential.verify_secret},
action=form_buttons.add)
flash.assert_success_message('User "{}" was saved'.format(new_user.name))
return new_user
def delete(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Delete this User', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('EVM User "{}": Delete successful'.format(self.name))
def edit_tags(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this User", invokes_alert=True)
fill(edit_tags_form, {'select_tag': tag,
'select_value': value},
action=form_buttons.save)
flash.assert_success_message('Tag edits were successfully saved')
def remove_tag(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this User", invokes_alert=True)
row = tag_table.find_row_by_cells({'category': tag, 'assigned_value': value},
partial_check=True)
sel.click(row[0])
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def description(self):
return self.credential.principal
@navigator.register(User, 'All')
class UserAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree(
"Access Control",
self.obj.appliance.server.zone.region.settings_string, "Users")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(User, 'Add')
class UserAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new User")
@navigator.register(User, 'Details')
class UserDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control",
self.obj.appliance.server.zone.region.settings_string,
"Users",
self.obj.name
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(User, 'Edit')
class UserEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this User')
class Group(Updateable, Pretty, Navigatable):
group_form = Form(
fields=[
('ldap_groups_for_user', AngularSelect("ldap_groups_user")),
('description_txt', Input('description')),
('lookup_ldap_groups_chk', Input('lookup')),
('role_select', AngularSelect("group_role")),
('group_tenant', AngularSelect("group_tenant"), {"appeared_in": "5.5"}),
('user_to_look_up', Input('user')),
('username', Input('user_id')),
('password', Input('password')),
])
pretty_attrs = ['description', 'role']
def __init__(self, description=None, role=None, tenant="My Company", user_to_lookup=None,
ldap_credentials=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.description = description
self.role = role
self.tenant = tenant
self.ldap_credentials = ldap_credentials
self.user_to_lookup = user_to_lookup
def create(self):
navigate_to(self, 'Add')
fill(self.group_form, {'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def _retrieve_ldap_user_groups(self):
navigate_to(self, 'Add')
fill(self.group_form, {'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup,
'username': self.ldap_credentials.principal,
'password': self.ldap_credentials.secret,
},)
sel.wait_for_element(form_buttons.retrieve)
sel.click(form_buttons.retrieve)
def _retrieve_ext_auth_user_groups(self):
navigate_to(self, 'Add')
fill(self.group_form, {'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup,
},)
sel.wait_for_element(form_buttons.retrieve)
sel.click(form_buttons.retrieve)
def add_group_from_ldap_lookup(self):
self._retrieve_ldap_user_groups()
fill(self.group_form, {'ldap_groups_for_user': self.description,
'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant,
},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def add_group_from_ext_auth_lookup(self):
self._retrieve_ext_auth_user_groups()
fill(self.group_form, {'ldap_groups_for_user': self.description,
'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant,
},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def update(self, updates):
navigate_to(self, 'Edit')
fill(self.group_form, {'description_txt': updates.get('description'),
'role_select': updates.get('role'),
'group_tenant': updates.get('tenant')},
action=form_buttons.save)
flash.assert_success_message(
'Group "{}" was saved'.format(updates.get('description', self.description)))
def delete(self):
navigate_to(self, 'Details')
tb_select('Delete this Group', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('EVM Group "{}": Delete successful'.format(self.description))
def edit_tags(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this Group", invokes_alert=True)
fill(edit_tags_form, {'select_tag': tag,
'select_value': value},
action=form_buttons.save)
flash.assert_success_message('Tag edits were successfully saved')
def remove_tag(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this Group", invokes_alert=True)
row = tag_table.find_row_by_cells({'category': tag, 'assigned_value': value},
partial_check=True)
sel.click(row[0])
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@navigator.register(Group, 'All')
class GroupAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Groups")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Group, 'Add')
class GroupAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new Group")
@navigator.register(Group, 'EditGroupSequence')
class EditGroupSequence(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select('Edit Sequence of User Groups for LDAP Look Up')
@navigator.register(Group, 'Details')
class GroupDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(),
"Groups", self.obj.description
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Group, 'Edit')
class GroupEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this Group')
def get_group_order():
navigate_to(Group, 'EditGroupSequence')
return group_order_selector.get_items()
def set_group_order(items):
original_order = get_group_order()
# We pick only the same amount of items for comparing
original_order = original_order[:len(items)]
if items == original_order:
return # Ignore that, would cause error on Save click
fill(group_order_selector, items)
sel.click(form_buttons.save)
class Role(Updateable, Pretty, Navigatable):
form = Form(
fields=[
('name_txt', Input('name')),
('vm_restriction_select', AngularSelect('vm_restriction')),
('product_features_tree', {
version.LOWEST: CheckboxTree("//div[@id='features_treebox']/ul"),
'5.7': BootstrapTreeview("features_treebox")}),
])
pretty_attrs = ['name', 'product_features']
def __init__(self, name=None, vm_restriction=None, product_features=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.vm_restriction = vm_restriction
self.product_features = product_features or []
def create(self):
navigate_to(self, 'Add')
fill(self.form, {'name_txt': self.name,
'vm_restriction_select': self.vm_restriction,
'product_features_tree': self.product_features},
action=form_buttons.add)
flash.assert_success_message('Role "{}" was saved'.format(self.name))
def update(self, updates):
navigate_to(self, 'Edit')
fill(self.form, {'name_txt': updates.get('name'),
'vm_restriction_select': updates.get('vm_restriction'),
'product_features_tree': updates.get('product_features')},
action=form_buttons.save)
flash.assert_success_message('Role "{}" was saved'.format(updates.get('name', self.name)))
def delete(self):
navigate_to(self, 'Details')
tb_select('Delete this Role', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('Role "{}": Delete successful'.format(self.name))
def copy(self, name=None):
if not name:
name = self.name + "copy"
navigate_to(self, 'Details')
tb.select('Configuration', 'Copy this Role to a new Role')
new_role = Role(name=name)
fill(self.form, {'name_txt': new_role.name},
action=form_buttons.add)
flash.assert_success_message('Role "{}" was saved'.format(new_role.name))
return new_role
@navigator.register(Role, 'All')
class RoleAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Roles")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Role, 'Add')
class RoleAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new Role")
@navigator.register(Role, 'Details')
class RoleDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(), "Roles", self.obj.name
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Role, 'Edit')
class RoleEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this Role')
class Tenant(Updateable, Pretty, Navigatable):
""" Class representing CFME tenants in the UI.
* Kudos to mfalesni *
The behaviour is shared with Project, which is the same except it cannot create more nested
tenants/projects.
Args:
name: Name of the tenant
description: Description of the tenant
parent_tenant: Parent tenant, can be None, can be passed as string or object
"""
save_changes = form_buttons.FormButton("Save changes")
# TODO:
# Temporary defining elements with "//input" as Input() is not working.Seems to be
# with html elements,looking into it.
quota_form = Form(
fields=[
('cpu_cb', CFMECheckbox('cpu_allocated')),
('cpu_txt', "//input[@id='id_cpu_allocated']"),
('memory_cb', CFMECheckbox('mem_allocated')),
('memory_txt', "//input[@id='id_mem_allocated']"),
('storage_cb', CFMECheckbox('storage_allocated')),
('storage_txt', "//input[@id='id_storage_allocated']"),
('vm_cb', CFMECheckbox('vms_allocated')),
('vm_txt', "//input[@id='id_vms_allocated']"),
('template_cb', CFMECheckbox('templates_allocated')),
('template_txt', "//input[@id='id_templates_allocated']")
])
tenant_form = Form(
fields=[
('name', Input('name')),
('description', Input('description'))
])
pretty_attrs = ["name", "description"]
@classmethod
def get_root_tenant(cls):
return cls(name="My Company", _default=True)
def __init__(self, name=None, description=None, parent_tenant=None, _default=False,
appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.description = description
self.parent_tenant = parent_tenant
self._default = _default
@property
def parent_tenant(self):
if self._default:
return None
if self._parent_tenant:
return self._parent_tenant
return self.get_root_tenant()
@parent_tenant.setter
def parent_tenant(self, tenant):
if tenant is not None and isinstance(tenant, Project):
# If we try to
raise ValueError("Project cannot be a parent object.")
if isinstance(tenant, basestring):
# If parent tenant is passed as string,
# we assume that tenant name was passed instead of object
tenant = Tenant(tenant)
self._parent_tenant = tenant
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.name == other.name
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def tree_path(self):
if self._default:
return [self.name]
else:
return self.parent_tenant.tree_path + [self.name]
@property
def parent_path(self):
return self.tree_path[:-1]
def create(self, cancel=False):
if self._default:
raise ValueError("Cannot create the root tenant {}".format(self.name))
navigate_to(self, 'Add')
fill(self.tenant_form, self, action=form_buttons.add)
if type(self) is Tenant:
flash.assert_success_message('Tenant "{}" was saved'.format(self.name))
elif type(self) is Project:
flash.assert_success_message('Project "{}" was saved'.format(self.name))
else:
raise TypeError(
'No Tenant or Project class passed to create method{}'.format(
type(self).__name__))
def update(self, updates):
navigate_to(self, 'Edit')
# Workaround - form is appearing after short delay
sel.wait_for_element(self.tenant_form.description)
fill(self.tenant_form, updates, action=self.save_changes)
flash.assert_success_message(
'Project "{}" was saved'.format(updates.get('name', self.name)))
def delete(self, cancel=False):
navigate_to(self, 'Details')
tb_select("Delete this item", invokes_alert=True)
sel.handle_alert(cancel=cancel)
flash.assert_success_message('Tenant "{}": Delete successful'.format(self.description))
def set_quota(self, **kwargs):
navigate_to(self, 'ManageQuotas')
# Workaround - form is appearing after short delay
sel.wait_for_element(self.quota_form.cpu_txt)
fill(self.quota_form, {'cpu_cb': kwargs.get('cpu_cb'),
'cpu_txt': kwargs.get('cpu'),
'memory_cb': kwargs.get('memory_cb'),
'memory_txt': kwargs.get('memory'),
'storage_cb': kwargs.get('storage_cb'),
'storage_txt': kwargs.get('storage'),
'vm_cb': kwargs.get('vm_cb'),
'vm_txt': kwargs.get('vm'),
'template_cb': kwargs.get('template_cb'),
'template_txt': kwargs.get('template')},
action=self.save_changes)
flash.assert_success_message('Quotas for Tenant "{}" were saved'.format(self.name))
@navigator.register(Tenant, 'All')
class TenantAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Tenants")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Tenant, 'Details')
class TenantDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(),
"Tenants", *self.obj.tree_path
)
@navigator.register(Tenant, 'Add')
class TenantAdd(CFMENavigateStep):
def prerequisite(self, *args, **kwargs):
navigate_to(self.obj.parent_tenant, 'Details')
def step(self, *args, **kwargs):
if isinstance(self.obj, Tenant):
add_selector = 'Add child Tenant to this Tenant'
elif isinstance(self.obj, Project):
add_selector = 'Add Project to this Tenant'
else:
raise OptionNotAvailable('Object type unsupported for Tenant Add: {}'
.format(type(self.obj).__name__))
tb.select('Configuration', add_selector)
@navigator.register(Tenant, 'Edit')
class TenantEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
tb.select('Configuration', 'Edit this item')
@navigator.register(Tenant, 'ManageQuotas')
class TenantManageQuotas(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
tb.select('Configuration', 'Manage Quotas')
class Project(Tenant):
""" Class representing CFME projects in the UI.
Project cannot create more child tenants/projects.
Args:
name: Name of the project
description: Description of the project
parent_tenant: Parent project, can be None, can be passed as string or object
"""
pass
|
jdemon519/cfme_tests
|
cfme/configure/access_control.py
|
Python
|
gpl-2.0
| 24,877
| 0.001729
|
"""
Signals for user profiles
"""
from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from discussions import tasks
from profiles.models import Profile
from roles.models import Role
from roles.roles import Permissions
@receiver(post_save, sender=Profile, dispatch_uid="sync_user_profile")
def sync_user_profile(sender, instance, created, **kwargs): # pylint: disable=unused-argument
"""
Signal handler create/update a DiscussionUser every time a profile is created/updated
"""
if not settings.FEATURES.get('OPEN_DISCUSSIONS_USER_SYNC', False):
return
transaction.on_commit(lambda: tasks.sync_discussion_user.delay(instance.user_id))
@receiver(post_save, sender=Role, dispatch_uid="add_staff_as_moderator")
def add_staff_as_moderator(sender, instance, created, **kwargs): # pylint: disable=unused-argument
"""
Signal handler add user as moderator when his staff role on program is added
"""
if not settings.FEATURES.get('OPEN_DISCUSSIONS_USER_SYNC', False):
return
if instance.role not in Role.permission_to_roles[Permissions.CAN_CREATE_FORUMS]:
return
transaction.on_commit(
lambda: tasks.add_user_as_moderator_to_channel.delay(
instance.user_id,
instance.program_id,
)
)
@receiver(post_delete, sender=Role, dispatch_uid="delete_staff_as_moderator")
def delete_staff_as_moderator(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler removes user as moderator when his staff role on program is deleted
"""
if not settings.FEATURES.get('OPEN_DISCUSSIONS_USER_SYNC', False):
return
if instance.role not in Role.permission_to_roles[Permissions.CAN_CREATE_FORUMS]:
return
transaction.on_commit(
lambda: tasks.remove_user_as_moderator_from_channel.delay(
instance.user_id,
instance.program_id,
)
)
|
mitodl/micromasters
|
discussions/signals.py
|
Python
|
bsd-3-clause
| 2,043
| 0.004405
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import ad
from google.ads.googleads.v8.services.types import ad_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdServiceGrpcTransport
class AdServiceClientMeta(type):
"""Metaclass for the AdService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdServiceTransport]]
_transport_registry["grpc"] = AdServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdServiceClient(metaclass=AdServiceClientMeta):
"""Service to manage ads."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_path(customer_id: str, ad_id: str,) -> str:
"""Return a fully-qualified ad string."""
return "customers/{customer_id}/ads/{ad_id}".format(
customer_id=customer_id, ad_id=ad_id,
)
@staticmethod
def parse_ad_path(path: str) -> Dict[str, str]:
"""Parse a ad path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ads/(?P<ad_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdServiceTransport):
# transport is a AdServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad(
self,
request: ad_service.GetAdRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad.Ad:
r"""Returns the requested ad in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdRequest`):
The request object. Request message for
[AdService.GetAd][google.ads.googleads.v8.services.AdService.GetAd].
resource_name (:class:`str`):
Required. The resource name of the ad
to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.Ad:
An ad.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.GetAdRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.GetAdRequest):
request = ad_service.GetAdRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_ads(
self,
request: ad_service.MutateAdsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_service.AdOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_service.MutateAdsResponse:
r"""Updates ads. Operation statuses are returned. Updating ads is
not supported for TextAd, ExpandedDynamicSearchAd, GmailAd and
ImageAd.
List of thrown errors: `AdCustomizerError <>`__ `AdError <>`__
`AdSharingError <>`__ `AdxError <>`__ `AssetError <>`__
`AssetLinkError <>`__ `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FeedAttributeReferenceError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `FunctionError <>`__
`FunctionParsingError <>`__ `HeaderError <>`__ `IdError <>`__
`ImageError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MediaBundleError <>`__ `MediaFileError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperatorError <>`__ `PolicyFindingError <>`__
`PolicyViolationError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateAdsRequest`):
The request object. Request message for
[AdService.MutateAds][google.ads.googleads.v8.services.AdService.MutateAds].
customer_id (:class:`str`):
Required. The ID of the customer
whose ads are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdOperation]`):
Required. The list of operations to
perform on individual ads.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateAdsResponse:
Response message for an ad mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.MutateAdsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.MutateAdsRequest):
request = ad_service.MutateAdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ads]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdServiceClient",)
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/ad_service/client.py
|
Python
|
apache-2.0
| 21,991
| 0.000909
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.exceptions import PermissionDenied
from mock import Mock, patch
from nose.tools import eq_, raises
from oneanddone.base.tests import TestCase
from oneanddone.users.mixins import BaseUserProfileRequiredMixin, PrivacyPolicyRequiredMixin, MyStaffUserRequiredMixin
from oneanddone.users.tests import UserFactory, UserProfileFactory
class FakeMixin(object):
def dispatch(self, request, *args, **kwargs):
return 'fakemixin'
class FakeView(BaseUserProfileRequiredMixin, FakeMixin):
pass
class FakeViewNeedsPrivacyPolicy(PrivacyPolicyRequiredMixin, FakeMixin):
pass
class FakeViewNeedsStaff(MyStaffUserRequiredMixin, FakeMixin):
pass
class MyStaffUserRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeViewNeedsStaff()
def test_is_staff(self):
"""
If the user is staff, call the parent class's
dispatch method.
"""
request = Mock()
request.user = UserFactory.create(is_staff=True)
eq_(self.view.dispatch(request), 'fakemixin')
@raises(PermissionDenied)
def test_not_staff(self):
"""
If the user is not staff, raise a PermissionDenied exception.
"""
request = Mock()
request.user = UserFactory.create(is_staff=False)
self.view.dispatch(request)
class PrivacyPolicyRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeViewNeedsPrivacyPolicy()
def test_has_profile_and_accepts_privacy_policy(self):
"""
If the user has created a profile, and has accepted privacy policy
call the parent class's dispatch method.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=True).user
eq_(self.view.dispatch(request), 'fakemixin')
def test_has_profile_and_not_accepted_privacy_policy(self):
"""
If the user has created a profile, and has not accepted privacy policy
redirect them to profile update view.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=False).user
with patch('oneanddone.users.mixins.redirect') as redirect:
eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.update')
def test_no_profile(self):
"""
If the user hasn't created a profile, redirect them to the
profile creation view.
"""
request = Mock()
request.user = UserFactory.create()
with patch('oneanddone.users.mixins.redirect') as redirect:
eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.create')
class UserProfileRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeView()
def test_has_profile(self):
"""
If the user has created a profile, and has accepted privacy policy
call the parent class's dispatch method.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=True).user
eq_(self.view.dispatch(request), 'fakemixin')
def test_no_profile(self):
"""
If the user hasn't created a profile, redirect them to the
profile creation view.
"""
request = Mock()
request.user = UserFactory.create()
with patch('oneanddone.users.mixins.redirect') as redirect:
eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.create')
|
adini121/oneanddone
|
oneanddone/users/tests/test_mixins.py
|
Python
|
mpl-2.0
| 3,843
| 0.001041
|
import frappe
from frappe.utils import get_fullname
def execute():
for user_id in frappe.db.sql_list("""select distinct user_id from `tabEmployee`
where ifnull(user_id, '')!=''
group by user_id having count(name) > 1"""):
fullname = get_fullname(user_id)
employee = frappe.db.get_value("Employee", {"employee_name": fullname, "user_id": user_id})
if employee:
frappe.db.sql("""update `tabEmployee` set user_id=null
where user_id=%s and name!=%s""", (user_id, employee))
|
gangadhar-kadam/hrerp
|
erpnext/patches/4_0/fix_employee_user_id.py
|
Python
|
agpl-3.0
| 491
| 0.028513
|
from django.apps import AppConfig
class VotesConfig(AppConfig):
name = 'meinberlin.apps.votes'
label = 'meinberlin_votes'
|
liqd/a4-meinberlin
|
meinberlin/apps/votes/apps.py
|
Python
|
agpl-3.0
| 132
| 0
|
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ..classes import Simulation, PeriodicTestGrid, NonperiodicTestGrid
from ..visualization.time_snapshots import FieldPlot, CurrentPlot
@pytest.fixture(params=(64, 128, 256, 512))
def _NG(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 10 * np.pi, 1000))
def _L(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 10 * np.pi, 1000))
def _test_charge_density(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 7.51))
def __t(request):
return request.param
def test_PoissonSolver(_NG, _L):
g = PeriodicTestGrid(1, _L, _NG)
charge_density = (2 * np.pi / _L) ** 2 * np.sin(2 * g.x * np.pi / _L)
field = np.zeros((_NG + 2, 3))
field[1:-1, 0] = -2 * np.pi / _L * np.cos(2 * np.pi * g.x / _L)
g.charge_density[:-1] = charge_density
g.init_solve()
def plots():
fig, axes = plt.subplots(2)
ax0, ax1 = axes
ax0.plot(g.x, charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(g.x, g.electric_field[1:-1], "r-", label="Fourier")
ax1.plot(g.x, field, "g-", label="Analytic")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver failed! calc/theory field ratio at 0: {}".format(g.electric_field[1] / field[0])
assert np.allclose(g.electric_field, field), plots()
# def test_PoissonSolver_complex(debug=DEBUG):
# L = 1
# N = 32 * 2**5
# epsilon_0 = 1
# x, dx = np.linspace(0, L, N, retstep=True, endpoint=False)
# anal_potential = lambda x: np.sin(x * 2 * np.pi) + 0.5 * \
# np.sin(x * 6 * np.pi) + 0.1 * np.sin(x * 20 * np.pi)
# anal_field = lambda x: -(2 * np.pi * np.cos(x * 2 * np.pi) + 3 * np.pi *
# np.cos(x * 6 * np.pi) + 20 * np.pi * 0.1 * np.cos(x * 20 * np.pi))
# charge_density_anal = lambda x: ((2 * np.pi)**2 * np.sin(x * 2 * np.pi) + 18 * np.pi**2 * np.sin(
# x * 6 * np.pi) + (20 * np.pi)**2 * 0.1 * np.sin(x * 20 * np.pi)) * epsilon_0
#
# NG = 32
# g = Frame(L, NG, epsilon_0)
# # indices_in_denser_grid = np.searchsorted(x, g.x)
# g.charge_density = charge_density_anal(g.x)
# energy_fourier = g.init_solver_fourier()
# energy_direct = 0.5 * (g.electric_field**2).sum() * g.dx
# print("dx", dx, "fourier", energy_fourier, "direct", energy_direct, energy_fourier / energy_direct)
#
# def plots():
# fig, xspace = plt.subplots()
# xspace.set_title(
# r"Solving the Poisson equation $\Delta \psi = \rho / \epsilon_0$ via Fourier transform")
# xspace.plot(g.x, g.charge_density, "ro--", label=r"$\rho$")
# xspace.plot(x, charge_density_anal(x), "r-", lw=6, alpha=0.5, label=r"$\rho_a$")
# xspace.plot(g.x, g.potential, "go--", label=r"$V$")
# xspace.plot(x, anal_potential(x), "g-", lw=6, alpha=0.5, label=r"$V_a$")
# xspace.plot(g.x, g.electric_field, "bo--", alpha=0.5, label=r"$E$")
# EplotAnal, = xspace.plot(x, anal_field(x), "b-", lw=6, alpha=0.5, label=r"$E_a$")
# xspace.set_xlim(0, L)
# xspace.set_xlabel("$x$")
# xspace.grid()
# xspace.legend(loc='best')
#
# fig2, fspace = plt.subplots()
# fspace.plot(g.k_plot, g.energy_per_mode, "bo--", label=r"electric energy $\rho_F V_F^\dagger$")
# fspace.set_xlabel("k")
# fspace.set_ylabel("mode energy")
# fspace.set_title("Fourier space")
# fspace.grid()
# fspace.legend(loc='best')
# plt.show()
# return "test_PoissonSolver_complex failed!"
#
# energy_correct = np.isclose(energy_fourier, energy_direct)
# field_correct = np.isclose(g.electric_field, anal_field(g.x)).all()
# potential_correct = np.isclose(g.potential, anal_potential(g.x)).all()
# assert field_correct and potential_correct and energy_correct, plots()
def test_PoissonSolver_energy_sine(_NG, ):
_L = 1
resolution_increase = _NG
N = _NG * resolution_increase
epsilon_0 = 1
x, dx = np.linspace(0, _L, N, retstep=True, endpoint=False)
anal_field = np.zeros((N, 3))
anal_field[:, 0] = -(2 * np.pi * np.cos(x * 2 * np.pi / _L))
charge_density_anal = ((2 * np.pi) ** 2 * np.sin(x * 2 * np.pi))
g = PeriodicTestGrid(1, _L, _NG, epsilon_0)
indices_in_denser_grid = np.searchsorted(x, g.x)
g.charge_density[:-1] = charge_density_anal[indices_in_denser_grid] # / resolution_increase
g.init_solve()
g.save_field_values(0)
g.postprocess()
energy_fourier = g.grid_energy_history[0]
energy_direct = g.direct_energy_calculation()
print("dx", dx, "fourier", energy_fourier, "direct", energy_direct, energy_fourier / energy_direct)
def plots():
fig, xspace = plt.subplots()
xspace.set_title(
r"Solving the Poisson equation $\Delta \psi = \rho / \epsilon_0$ via Fourier transform")
xspace.plot(g.x, g.charge_density, "ro--", label=r"$\rho$")
xspace.plot(x, charge_density_anal, "r-", lw=6, alpha=0.5, label=r"$\rho_a$")
xspace.plot(g.x, g.electric_field, "bo--", alpha=0.5, label=r"$E$")
xspace.plot(x, anal_field, "b-", lw=6, alpha=0.5, label=r"$E_a$")
xspace.set_xlim(0, _L)
xspace.set_xlabel("$x$")
xspace.grid()
xspace.legend(loc='best')
fig2, fspace = plt.subplots()
fspace.plot(g.k_plot, g.energy_per_mode, "bo--", label=r"electric energy $\rho_F V_F^\dagger$")
fspace.set_xlabel("k")
fspace.set_ylabel("mode energy")
fspace.set_title("Fourier space")
fspace.grid()
fspace.legend(loc='best')
plt.show()
return "test_PoissonSolver_complex failed!"
energy_correct = np.allclose(energy_fourier, energy_direct)
assert energy_correct, plots()
field_correct = np.allclose(g.electric_field[1:-1, 0], anal_field[indices_in_denser_grid][:, 0])
assert field_correct, plots()
def test_PoissonSolver_sheets(_NG, _L, _test_charge_density=1):
epsilon_0 = 1
x, dx = np.linspace(0, _L, _NG, retstep=True, endpoint=False)
charge_density = np.zeros_like(x)
region1 = (_L * 1 / 8 < x) * (x < _L * 2 / 8)
region2 = (_L * 5 / 8 < x) * (x < _L * 6 / 8)
charge_density[region1] = _test_charge_density
charge_density[region2] = -_test_charge_density
g = PeriodicTestGrid(1, _L, _NG, epsilon_0)
g.charge_density[:-1] = charge_density
g.init_solve()
def plots():
fig, axes = plt.subplots(3)
ax0, ax1 = axes
ax0.plot(x, charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(x, g.electric_field, "r-")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver_sheets failed!"
polynomial_coefficients = np.polyfit(x[region1], g.electric_field[1:-1, 0][region1], 1)
first_bump_right = np.isclose(
polynomial_coefficients[0], _test_charge_density, rtol=1e-2)
assert first_bump_right, plots()
polynomial_coefficients = np.polyfit(x[region2], g.electric_field[1:-1, 0][region2], 1)
second_bump_right = np.isclose(
polynomial_coefficients[0], -_test_charge_density, rtol=1e-2)
assert second_bump_right, plots()
def test_PoissonSolver_ramp(_NG, _L):
""" For a charge density rho = Ax + B
d2phi/dx2 = -rho/epsilon_0
set epsilon_0 to 1
d2phi/dx2 = Ax
phi must be of form
phi = -Ax^3/6 + Bx^2 + Cx + D"""
a = 1
# noinspection PyArgumentEqualDefault
g = PeriodicTestGrid(1, _L, _NG, epsilon_0=1)
g.charge_density[:-1] = a * g.x
g.init_solve()
field = a * (g.x - _L / 2) ** 2 / 2
def plots():
fig, axes = plt.subplots(2)
ax0, ax1 = axes
ax0.plot(g.x, g.charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(g.x, g.electric_field, "r-")
ax1.plot(g.x, field, "g-")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver_ramp failed!"
polynomial_coefficients = np.polyfit(g.x, g.electric_field[1:-1, 0], 2)
assert np.isclose(polynomial_coefficients[0], a / 2, rtol=1e-2), (polynomial_coefficients[0], a / 2, plots())
def test_BunemanSolver(__t, _NG, _L, _test_charge_density):
g = NonperiodicTestGrid(__t, _L, _NG)
charge_index = _NG // 2
g.current_density_x[charge_index] = _test_charge_density
g.solve()
g.save_field_values(0)
S = Simulation(g)
pulled_field = g.electric_field[charge_index, 0]
expected_field = - g.dt / g.epsilon_0 * _test_charge_density
def plot():
fig, (ax1, ax2) = plt.subplots(2)
CurrentPlot(S, ax1, 0).update(0)
FieldPlot(S, ax2, 0).update(0)
plt.show()
assert np.isclose(pulled_field, expected_field), plot()
def test_BunemanSolver_charge(__t, _NG, _L, _test_charge_density):
g = NonperiodicTestGrid(__t, _L, _NG)
v = 0.5
g.current_density_x[1:-2] = v * _test_charge_density
g.solve()
g.save_field_values(0)
S = Simulation(g).postprocess()
def plot():
fig, (ax1, ax2) = plt.subplots(2)
CurrentPlot(S, ax1, 0).update(0)
FieldPlot(S, ax2, 0).update(0)
plt.show()
assert np.allclose(g.electric_field[1:-1,0], -v * _test_charge_density * g.dt / g.epsilon_0), plot()
|
StanczakDominik/PythonPIC
|
pythonpic/tests/test_FieldSolver.py
|
Python
|
bsd-3-clause
| 9,551
| 0.003036
|
# -*- coding: utf-8 -*-
"""
Tests for the WMS Service Type.
"""
import unittest
from httmock import with_httmock
import mocks.warper
from aggregator.models import Service
class TestWarper(unittest.TestCase):
@with_httmock(mocks.warper.resource_get)
def test_create_wms_service(self):
# create the service
service = Service(
type='WARPER',
url='http://warper.example.com/warper/maps',
)
service.save()
# check layer number
self.assertEqual(service.layer_set.all().count(), 15)
# check layer 0 (public)
layer_0 = service.layer_set.all()[0]
self.assertEqual(layer_0.name, '29568')
self.assertEqual(layer_0.title, 'Plate 24: Map bounded by Myrtle Avenue')
self.assertTrue(layer_0.is_public)
self.assertEqual(layer_0.keywords.all().count(), 0)
self.assertEqual(layer_0.srs.all().count(), 3)
self.assertEqual(layer_0.check_set.all().count(), 1)
self.assertEqual(layer_0.layerdate_set.all()[0].date, '1855-01-01')
# a layer with no bbox must be stored with None coordinates
layer_no_bbox = service.layer_set.get(name='16239')
self.assertEqual(layer_no_bbox.bbox_x0, None)
self.assertEqual(layer_no_bbox.bbox_y0, None)
self.assertEqual(layer_no_bbox.bbox_x1, None)
self.assertEqual(layer_no_bbox.bbox_y1, None)
# test that if creating the service and is already exiting it is not being duplicated
# create the service
def create_duplicated_service():
duplicated_service = Service(
type='WARPER',
url='http://warper.example.com/warper/maps',
)
duplicated_service.save()
self.assertRaises(Exception, create_duplicated_service)
if __name__ == '__main__':
unittest.main()
|
jmwenda/hypermap
|
hypermap/aggregator/tests/test_warper.py
|
Python
|
mit
| 1,877
| 0.001598
|
class Solution(object):
def asteroidCollision(self, asteroids):
"""
:type asteroids: List[int]
:rtype: List[int]
"""
ret = []
for elem in asteroids:
if elem > 0:
ret.append(elem)
else:
while ret:
if 0 < ret[-1] <= -elem:
temp = ret.pop()
if temp == -elem:
break
else:
if ret[-1] < 0:
ret.append(elem)
break
else:
ret.append(elem)
return ret
print(Solution().asteroidCollision([5, 10, -5]))
print(Solution().asteroidCollision([8, -8]))
print(Solution().asteroidCollision([10, 2, -5]))
print(Solution().asteroidCollision([-2, -1, 1, 2]))
|
wufangjie/leetcode
|
735. Asteroid Collision.py
|
Python
|
gpl-3.0
| 882
| 0
|
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Serialization of objects relevant to TF-Coder.
This module will be used to send information from the public Colab notebook to
Google Analytics, in string form. Using BigQuery we can extract the strings that
were sent, and then parse the strings back into the Python objects that they
represent. The information we want to log includes:
* Input/output objects. Usually these are multidimensional lists, Tensors, or
SparseTensors, but in principle these can be anything that value search
supports (e.g., primitives, dtypes, tuples of Tensors etc.).
* Constants. Usually these are Python primitives, but again they may be
anything value search supports (e.g., a shape tuple).
* Natural language description. This should be a string and may contain tricky
characters like Unicode or quotes.
* Settings for the TF-Coder tool. These may use standard Python collections,
i.e. lists/tuples/sets/dicts. This category of information should be treated
generally to be future-proof.
* Results of the TF-Coder tool. These would include timestamps and solution
expressions.
* Other metadata, e.g., session/problem IDs, and whether the data can be
released in a dataset.
"""
import ast
from typing import Any, List, Text
import numpy as np
import tensorflow as tf
# Constant strings for dict representations of objects.
_KIND_KEY = 'kind'
_DTYPE_KIND = 'DType'
_TENSOR_KIND = 'Tensor'
_SPARSE_TENSOR_KIND = 'SparseTensor'
_DICT_KIND = 'Dict'
def _object_to_literal(to_serialize: Any, container_stack: List[Any]) -> Any:
"""Turns a supported object into a Python literal."""
if isinstance(to_serialize, (int, float, bool, str, bytes, type(None))):
return to_serialize
elif isinstance(to_serialize, tf.DType):
dtype_string = repr(to_serialize)
assert dtype_string.startswith('tf.')
dtype_string = dtype_string[len('tf.'):]
return {_KIND_KEY: _DTYPE_KIND,
'dtype': dtype_string}
elif isinstance(to_serialize, tf.Tensor):
tensor_content = to_serialize.numpy()
# Sometimes tensor_content is a numpy type, and sometimes it's a normal
# Python type.
if type(tensor_content).__module__ == np.__name__:
tensor_content = tensor_content.tolist()
return {_KIND_KEY: _TENSOR_KIND,
'content': tensor_content,
'dtype': _object_to_literal(to_serialize.dtype, container_stack)}
elif isinstance(to_serialize, tf.SparseTensor):
return {_KIND_KEY: _SPARSE_TENSOR_KIND,
'indices': _object_to_literal(to_serialize.indices,
container_stack),
'values': _object_to_literal(to_serialize.values, container_stack),
'dense_shape': _object_to_literal(to_serialize.dense_shape,
container_stack)}
elif isinstance(to_serialize, dict):
if any(to_serialize is seen for seen in container_stack):
raise ValueError('Cycle detected in object dependencies.')
container_stack.append(to_serialize)
result = {_object_to_literal(key, container_stack):
_object_to_literal(value, container_stack)
for key, value in to_serialize.items()}
container_stack.pop()
return {_KIND_KEY: _DICT_KIND,
'dict': result}
elif isinstance(to_serialize, (list, tuple, set)):
if any(to_serialize is seen for seen in container_stack):
raise ValueError('Cycle detected in object dependencies.')
container_stack.append(to_serialize)
generator = (_object_to_literal(x, container_stack) for x in to_serialize)
container_type = type(to_serialize)
result = container_type(generator)
container_stack.pop()
return result
else:
raise TypeError('Cannot convert object {} with type {} to a literal.'
.format(to_serialize, type(to_serialize)))
def _literal_to_object(literal: Any) -> Any:
"""Turns a literal created by _object_to_literal back into the object."""
if isinstance(literal, (int, float, bool, str, bytes, type(None))):
return literal
elif isinstance(literal, dict):
# If the dict was not created by _object_to_literal, we may throw KeyError.
kind = literal[_KIND_KEY]
if kind == _DTYPE_KIND:
return getattr(tf, literal['dtype'])
elif kind == _TENSOR_KIND:
return tf.constant(literal['content'],
dtype=_literal_to_object(literal['dtype']))
elif kind == _SPARSE_TENSOR_KIND:
return tf.SparseTensor(
indices=_literal_to_object(literal['indices']),
values=_literal_to_object(literal['values']),
dense_shape=_literal_to_object(literal['dense_shape']))
elif kind == _DICT_KIND:
return {_literal_to_object(key): _literal_to_object(value)
for key, value in literal['dict'].items()}
else:
raise ValueError('Unsupported kind in dict: {}'.format(kind))
elif isinstance(literal, (list, tuple, set)):
generator = (_literal_to_object(x) for x in literal)
container_type = type(literal)
return container_type(generator)
else:
raise TypeError('Cannot convert literal {} with type {} to an object.'
.format(literal, type(literal)))
def serialize(to_serialize: Any) -> Text:
"""Serializes an object into a string.
Note: This does not work in Python 2 because its ast.literal_eval does not
support sets.
Args:
to_serialize: The object to serialize. This may be a Python literal (int,
float, boolean, string, or None), Tensor, SparseTensor, or
possibly-nested lists/tuples/sets/dicts of these.
Returns:
A string representation of the object.
"""
return repr(_object_to_literal(to_serialize, container_stack=[]))
def parse(serialized: Text) -> Any:
"""Unparses a string into an object (the inverse of serialize_object)."""
literal = ast.literal_eval(serialized)
return _literal_to_object(literal)
|
google-research/tensorflow-coder
|
tf_coder_colab_logging/serialization.py
|
Python
|
apache-2.0
| 6,543
| 0.004585
|
from datetime import date, timedelta
from django.db import models
from django.contrib.sites.models import Site
from django.core.validators import MinValueValidator
from clubdata.models import Club
def range_date_inclusive(start_date, end_date):
for n in range((end_date - start_date).days+1):
yield start_date + timedelta(n)
def num_days_in_month(d):
dmonth = d.month
if dmonth == 12:
return 31
else:
return (d.replace(month=dmonth+1, day=1) - timedelta(days=1)).day
def last_day_in_month(d):
dmonth = d.month
if dmonth == 12:
return d.replace(day=31)
else:
return d.replace(month=dmonth+1, day=1) - timedelta(days=1)
def decode_weekly_criteria(criteria):
c = criteria.split(",")
dow_possible = ('mo','tu','we','th','fr','sa','su')
dow = [False,False,False,False,False,False,False]
for x in c: dow[dow_possible.index(x)] = True
return dow
def decode_monthly_criteria(criteria):
c = criteria.split(",")
specificdays = []
daystocalculate = []
dow_possible = ('mo','tu','we','th','fr','sa','su')
for x in c:
if x.isdigit():
# Specific numbered day (same every month)
specificdays.append(int(x))
else:
# A code to represent a day. We'll convert from strings to integers for later.
if x == 'last':
# Last day of the month (must be calculated later)
daystocalculate.append( (99, -1) )
else:
y,z = x.split("-")
if y == 'last':
# Last DOW of the month (must be calculated later)
daystocalculate.append( (99, dow_possible.index(z)) )
else:
# Specified DOW of the month (must be calculated later)
daystocalculate.append( (int(y), dow_possible.index(z)) )
return specificdays,daystocalculate
class RecurringEvent(models.Model):
DAILY = 100
WEEKLY = 200
MONTHLY = 300
rule_type_choices = (
(DAILY, 'Daily'),
(WEEKLY, 'Weekly'),
(MONTHLY, 'Monthly'),
)
id = models.AutoField(
primary_key=True)
# Range
starts_on = models.DateField('Starts on')
ends_on = models.DateField('Ends on')
# Rule
rule_type = models.IntegerField('Recurring rule',
choices=rule_type_choices,
default=WEEKLY)
repeat_each = models.IntegerField('Repeat each',
default=1,
validators=[MinValueValidator(1)])
criteria = models.CharField('Criteria',
max_length=200,
null=True, # Blank is stored as Null
blank=True) # Field is optional
class Meta:
verbose_name = 'Recurring Event'
verbose_name_plural = 'Recurring Events'
def __unicode__(self): #Python 3.3 is __str__
rt = self.rule_type
for t in self.rule_type_choices:
if t[0] == rt:
rt = t[1]
break
return "%s Event, %s to %s, Criteria=\"%s\"" % (rt, self.starts_on, self.ends_on, self.criteria)
def dates_per_rule_iter(self):
if self.rule_type == self.WEEKLY:
# criteria = Must be a comma-separated list of lowercase 2-letter abbreviations for the days
# of the week. Ex: mo,we,fr,su
# repeat_each = If this is 2, then every other week will be skipped. If it is 3,
# then two weeks will be skipped between each filled week. etc...
# Deconstruct the criteria
criteria = decode_weekly_criteria(self.criteria)
# Generate a list of dates that match
if self.repeat_each == 1:
# If repeat_each is 1, then our calculation is much simpler
for x in range_date_inclusive(self.starts_on, self.ends_on):
if criteria[x.weekday()]: yield x
else:
# Special handling because we're not doing every week
r = 2 # Set this to 2 so the first iteration will set it to 1
dow_begin = self.starts_on.weekday()
for x in range_date_inclusive(self.starts_on, self.ends_on):
wd = x.weekday()
if wd == dow_begin:
# It's the beginning of a new week (rather than assuming the user considers Monday to be
# the first day of the week, we use the DOW of the start of the range for this purpose.
if r == 1:
# Reset the counter
r = self.repeat_each
else:
# Decrease the counter
r -= 1
if r == 1:
# If counter is 1, then this week should be included
if criteria[wd]: yield x
elif self.rule_type == self.MONTHLY:
# criteria = Must be a comma-separated list of the following types of codes:
# * 1,2,3,4, etc specific days of the month
# * 1-mo, 3-fr, last-we, etc first Monday, third Friday, last Wednesday, etc.
# * last last day of the month
# repeat_each = If this is 2, then every other month will be skipped. If it is 3, then two
# months will be skipped between each filled month. etc...
# Deconstruct the criteria
specificdays,daystocalculate = decode_monthly_criteria(self.criteria)
# Generate a list of dates that match
calcdays = None
oneday = timedelta(days=1)
r = 2 # Set this to 2 so the first iteration will set it to 1
for x in range_date_inclusive(self.starts_on, self.ends_on):
xday = x.day
if (xday == 1) or (calcdays is None):
# It's the first day of the month (or first iteration of this loop)
if r == 1:
# Reset the counter
r = self.repeat_each
else:
# Decrease the counter
r -= 1
if r == 1: # Putting this within the above 'else' will malfunction if repeat_each is 1
# Since this month is included, we must turn those vague days into specific numbered days
# for this current month (each month is different, so they couldn't have been calculated earlier.
calcdays = []
for y in daystocalculate:
if y[0] == 99:
if y[1] == -1:
# Calculate the last day of the month
calcdays.append(num_days_in_month(x))
else:
# Calculate the last DOW of the month
end_date = last_day_in_month(x)
for z in range(end_date.day):
d = end_date - timedelta(z)
if d.weekday() == y[1]:
calcdays.append(d.day)
break
else:
# Calculate the specified DOW of the month
start_date = date(x.year, x.month, 1)
found_count = 0
for z in range(num_days_in_month(start_date)):
d = start_date + timedelta(z)
if d.weekday() == y[1]:
found_count += 1
if found_count == y[0]:
calcdays.append(z+1)
break
print(calcdays)
# Check if this month is included (not a skipped month per the repeat_each rule)
if r == 1:
if (xday in specificdays) or (xday in calcdays):
# Assuming the daystocalculate have been calculated (above), simply check if the day is
# in one of the two lists
yield x
elif self.rule_type == self.DAILY:
# criteria = Not used
# repeat_each = If this is 2, then every other day will be skipped. If it is 3, only every
# third day will be chosen. etc...
# Generate a list of dates that match
if self.repeat_each == 1:
# If repeat_each is 1, then our calculation is much simpler
for x in range_date_inclusive(self.starts_on, self.ends_on):
yield x
else:
# Use the repeat value.
r = self.repeat_each # Include the first day of the range, and then start counting from there
for x in range_date_inclusive(self.starts_on, self.ends_on):
if r == self.repeat_each:
yield x
r = 1
else:
r += 1
class CustomEventManager(models.Manager):
# Custom manager to show only the items that either
# (a) belong to the current Club, or
# (b) belong to no Club
use_in_migrations = True
current_club_id = None
def _get_current_club_id(self):
if not self.current_club_id:
current_site = Site.objects.get_current()
if current_site.has_club:
self.current_club_id = current_site.club.id
return self.current_club_id
def get_queryset(self):
return super(CustomEventManager, self).get_queryset().filter(
models.Q(club=self._get_current_club_id()) | models.Q(club=None))
class Event(models.Model):
id = models.AutoField(
primary_key=True)
club = models.ForeignKey(Club, verbose_name='Specific to club',
help_text='Only the specified club will show the event on their calendar. If none, event will show on calendars of all clubs.',
null=True, # Blank is stored as Null
blank=True, # Field is optional
on_delete=models.SET_NULL) # Deleting a club will leave all associated events behind as global events
title = models.CharField('Title',
blank=True, # Field is optional
max_length=200)
start = models.DateTimeField('Start date/time')
duration = models.DurationField('Duration')
all_day = models.BooleanField('All day event?',
default=False)
recurring = models.ForeignKey(RecurringEvent, verbose_name='Belongs to recurring group',
null=True, # Blank is stored as Null
blank=True, # Field is optional
on_delete=models.SET_NULL) # Deleting an EventGroup will leave all linked events as isolated events
objects = CustomEventManager()
class Meta:
verbose_name = 'Event'
verbose_name_plural = 'Events'
def __unicode__(self): #Python 3.3 is __str__
return "%s %s" % (self.start, self.title)
|
InfoSec-CSUSB/club-websystem
|
src/events/models.py
|
Python
|
mit
| 10,430
| 0.020997
|
from django.core.checks.compatibility.django_1_10 import (
check_duplicate_middleware_settings,
)
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckDuplicateMiddlwareSettingsTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_duplicate_setting(self):
result = check_duplicate_middleware_settings(None)
self.assertEqual(result[0].id, '1_10.W001')
@override_settings(MIDDLEWARE=None)
def test_middleware_not_defined(self):
result = check_duplicate_middleware_settings(None)
self.assertEqual(len(result), 0)
|
kawamon/hue
|
desktop/core/ext-py/Django-1.11.29/tests/check_framework/tests_1_10_compatibility.py
|
Python
|
apache-2.0
| 688
| 0.001453
|
from django.core.management.base import BaseCommand
from migrate_dns.import_utils import do_import
class Command(BaseCommand):
args = ''
def handle(self, *args, **options):
do_import()
|
rtucker-mozilla/inventory
|
migrate_dns/management/commands/dns_migrate.py
|
Python
|
bsd-3-clause
| 204
| 0.004902
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.binaries.binary_tool import BinaryToolBase
from pants.binaries.binary_util import (
BinaryToolFetcher,
BinaryToolUrlGenerator,
BinaryUtil,
HostPlatform,
)
from pants.option.scope import GLOBAL_SCOPE
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump
class DefaultVersion(BinaryToolBase):
options_scope = "default-version-test"
name = "default_version_test_tool"
default_version = "XXX"
class AnotherTool(BinaryToolBase):
options_scope = "another-tool"
name = "another_tool"
default_version = "0.0.1"
class ReplacingLegacyOptionsTool(BinaryToolBase):
# TODO: check scope?
options_scope = "replacing-legacy-options-tool"
name = "replacing_legacy_options_tool"
default_version = "a2f4ab23a4c"
replaces_scope = "old_tool_scope"
replaces_name = "old_tool_version"
class BinaryUtilFakeUname(BinaryUtil):
def host_platform(self):
return HostPlatform("xxx", "yyy")
class CustomUrlGenerator(BinaryToolUrlGenerator):
_DIST_URL_FMT = "https://custom-url.example.org/files/custom_urls_tool-{version}-{system_id}"
_SYSTEM_ID = {
"xxx": "zzz",
}
def generate_urls(self, version, host_platform):
base = self._DIST_URL_FMT.format(
version=version, system_id=self._SYSTEM_ID[host_platform.os_name]
)
return [
base,
f"{base}-alternate",
]
class CustomUrls(BinaryToolBase):
options_scope = "custom-urls"
name = "custom_urls_tool"
default_version = "v2.1"
def get_external_url_generator(self):
return CustomUrlGenerator()
def _select_for_version(self, version):
binary_request = self.make_binary_request(version)
return BinaryUtilFakeUname.Factory._create_for_cls(BinaryUtilFakeUname).select(
binary_request
)
# TODO: these should have integration tests which use BinaryTool subclasses overriding archive_type.
class BinaryToolBaseTest(TestBase):
def setUp(self):
super().setUp()
self._context = self.context(
for_subsystems=[DefaultVersion, AnotherTool, ReplacingLegacyOptionsTool, CustomUrls],
options={
GLOBAL_SCOPE: {
"binaries_baseurls": ["https://binaries.example.org"],
"pants_bootstrapdir": str(temporary_dir()),
},
"another-tool": {"version": "0.0.2",},
"default-version-test.another-tool": {"version": "YYY",},
"custom-urls": {"version": "v2.3",},
"old_tool_scope": {"old_tool_version": "3",},
},
)
def test_base_options(self):
# TODO: using extra_version_option_kwargs!
default_version_tool = DefaultVersion.global_instance()
self.assertEqual(default_version_tool.version(), "XXX")
another_tool = AnotherTool.global_instance()
self.assertEqual(another_tool.version(), "0.0.2")
another_default_version_tool = DefaultVersion.scoped_instance(AnotherTool)
self.assertEqual(another_default_version_tool.version(), "YYY")
def test_replacing_legacy_options(self):
replacing_legacy_options_tool = ReplacingLegacyOptionsTool.global_instance()
self.assertEqual(replacing_legacy_options_tool.version(), "a2f4ab23a4c")
self.assertEqual(replacing_legacy_options_tool.version(self._context), "3")
def test_urls(self):
default_version_tool = DefaultVersion.global_instance()
self.assertIsNone(default_version_tool.get_external_url_generator())
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
default_version_tool.select()
err_msg = str(cm.exception)
self.assertIn(BinaryToolFetcher.BinaryNotFound.__name__, err_msg)
self.assertIn("Failed to fetch default_version_test_tool binary from any source:", err_msg)
self.assertIn(
"Failed to fetch binary from https://binaries.example.org/bin/default_version_test_tool/XXX/default_version_test_tool:",
err_msg,
)
custom_urls_tool = CustomUrls.global_instance()
self.assertEqual(custom_urls_tool.version(), "v2.3")
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
custom_urls_tool.select()
err_msg = str(cm.exception)
self.assertIn(BinaryToolFetcher.BinaryNotFound.__name__, err_msg)
self.assertIn("Failed to fetch custom_urls_tool binary from any source:", err_msg)
self.assertIn(
"Failed to fetch binary from https://custom-url.example.org/files/custom_urls_tool-v2.3-zzz:",
err_msg,
)
self.assertIn(
"Failed to fetch binary from https://custom-url.example.org/files/custom_urls_tool-v2.3-zzz-alternate:",
err_msg,
)
def test_hackily_snapshot(self):
with temporary_dir() as temp_dir:
safe_file_dump(
os.path.join(
temp_dir,
"bin",
DefaultVersion.name,
DefaultVersion.default_version,
DefaultVersion.name,
),
"content!",
)
context = self.context(
for_subsystems=[DefaultVersion],
options={GLOBAL_SCOPE: {"binaries_baseurls": [f"file:///{temp_dir}"],},},
)
self.maxDiff = None
default_version_tool = DefaultVersion.global_instance()
_, snapshot = default_version_tool.hackily_snapshot(context)
self.assertEqual(
"51a98706ab7458069aabe01856cb352ca97686e3edd3bf9ebd3205c2b38b2974",
snapshot.directory_digest.fingerprint,
)
|
wisechengyi/pants
|
tests/python/pants_test/binaries/test_binary_tool.py
|
Python
|
apache-2.0
| 6,038
| 0.003478
|
#! /usr/bin/env python3
with open('x.c', 'w') as f:
print('int main(void) { return 0; }', file=f)
with open('y', 'w'):
pass
|
pexip/meson
|
test cases/common/229 custom_target source/x.py
|
Python
|
apache-2.0
| 132
| 0
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Blocking and non-blocking HTTP client implementations using pycurl."""
import io
import collections
import logging
import pycurl
import threading
import time
from tornado import httputil
from tornado import ioloop
from tornado import stack_context
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop=None, max_clients=10,
max_simultaneous_connections=None):
self.io_loop = io_loop
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [_curl_create(max_simultaneous_connections)
for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
try:
self._socket_action = self._multi.socket_action
except AttributeError:
# socket_action is found in pycurl since 7.18.2 (it's been
# in libcurl longer than that but wasn't accessible to
# python).
logging.warning("socket_action method missing from pycurl; "
"falling back to socket_all. Upgrading "
"libcurl and pycurl will improve performance")
self._socket_action = \
lambda fd, action: self._multi.socket_all()
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
def close(self):
self._force_timeout_callback.stop()
for curl in self._curls:
curl.close()
self._multi.close()
self._closed = True
super(CurlAsyncHTTPClient, self).close()
def fetch(self, request, callback, **kwargs):
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
self._requests.append((request, stack_context.wrap(callback)))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
if fd not in self._fds:
self._fds[fd] = ioloop_event
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
else:
self._fds[fd] = ioloop_event
self.io_loop.update_handler(fd, ioloop_event)
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
time.time() + msecs/1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout != -1:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": io.StringIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
# Disable IPv6 to mitigate the effects of this bug
# on curl versions <= 7.21.0
# http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
if pycurl.version_info()[2] <= 0x71500: # 7.21.0
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create(max_simultaneous_connections=None):
curl = pycurl.Curl()
if logging.getLogger().isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, utf8(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[utf8("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[utf8("%s: %s" % i) for i in request.headers.items()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers, line))
if request.streaming_callback:
curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback)
else:
curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, utf8(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.use_gzip:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
# (but see version check in _process_queue above)
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE"])
for o in list(curl_options.values()):
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
request_buffer = io.StringIO(utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, utf8(userpwd))
logging.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
logging.debug("%s %s", request.method, request.url)
if request.client_key is not None or request.client_cert is not None:
raise ValueError("Client certificate not supported with curl_httpclient")
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
logging.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
logging.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
logging.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
|
e1ven/Waymoot
|
libs/tornado-2.2/build/lib/tornado/curl_httpclient.py
|
Python
|
mit
| 18,106
| 0.000663
|
from canvas import MappingCanvas
from viewport import MappingViewport
try:
from geojson_overlay import GeoJSONOverlay
except ImportError:
# No geojson
pass
# Tile managers
from mbtile_manager import MBTileManager
from http_tile_manager import HTTPTileManager
|
nmichaud/enable-mapping
|
mapping/enable/api.py
|
Python
|
bsd-3-clause
| 273
| 0
|
"""Ensures that account.identifier is unique.
Revision ID: ea2739ecd874
Revises: 5bd631a1b748
Create Date: 2017-09-29 09:16:09.436339
"""
# revision identifiers, used by Alembic.
revision = 'ea2739ecd874'
down_revision = '5bd631a1b748'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'account', ['identifier'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'account', type_='unique')
# ### end Alembic commands ###
|
stackArmor/security_monkey
|
migrations/versions/ea2739ecd874_.py
|
Python
|
apache-2.0
| 646
| 0.003096
|
from model.flyweight import Flyweight
from model.static.database import database
class Service(Flyweight):
def __init__(self,service_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.service_id = service_id
cursor = database.get_cursor(
"select * from staServices where serviceID={};".format(
self.service_id))
row = cursor.fetchone()
self.service_name = row["serviceName"]
self.description = row["description"]
cursor.close()
|
Iconik/eve-suite
|
src/model/static/sta/services.py
|
Python
|
gpl-3.0
| 624
| 0.00641
|
import numpy as np
def test_prepare_abi_connectivity_maps():
from samri.fetch.local import prepare_abi_connectivity_maps
prepare_abi_connectivity_maps('Ventral_tegmental_area',
invert_lr_experiments=[
"127651139",
"127796728",
"127798146",
"127867804",
"156314762",
"160539283",
"160540751",
"165975096",
"166054222",
"171021829",
"175736945",
"278178382",
"292958638",
"301062306",
"304337288",
],
)
def test_prepare_feature_map():
from samri.fetch.local import prepare_feature_map
prepare_feature_map('/usr/share/ABI-connectivity-data/Ventral_tegmental_area-127651139/',
invert_lr=True,
save_as='/var/tmp/samri_testing/pytest/vta_127651139.nii.gz',
)
def test_summary_atlas():
from samri.fetch.local import summary_atlas
mapping='/usr/share/mouse-brain-templates/dsurqe_labels.csv'
atlas='/usr/share/mouse-brain-templates/dsurqec_40micron_labels.nii'
summary={
1:{
'structure':'Hippocampus',
'summarize':['CA'],
'laterality':'right',
},
2:{
'structure':'Hippocampus',
'summarize':['CA'],
'laterality':'left',
},
3:{
'structure':'Cortex',
'summarize':['cortex'],
'laterality':'right',
},
4:{
'structure':'Cortex',
'summarize':['cortex'],
'laterality':'left',
},
}
new_atlas, new_mapping = summary_atlas(atlas,mapping,
summary=summary,
)
new_atlas_data = new_atlas.get_data()
output_labels = np.unique(new_atlas_data).tolist()
target_labels = [0,]
target_labels.extend([i for i in summary.keys()])
assert output_labels == target_labels
def test_roi_from_atlaslabel():
from samri.fetch.local import roi_from_atlaslabel
mapping='/usr/share/mouse-brain-templates/dsurqe_labels.csv'
atlas='/usr/share/mouse-brain-templates/dsurqec_40micron_labels.nii'
my_roi = roi_from_atlaslabel(atlas,
mapping=mapping,
label_names=['cortex'],
)
roi_data = my_roi.get_data()
output_labels = np.unique(roi_data).tolist()
assert output_labels == [0, 1]
my_roi = roi_from_atlaslabel(atlas,
mapping=mapping,
label_names=['cortex'],
output_label=3,
)
roi_data = my_roi.get_data()
output_labels = np.unique(roi_data).tolist()
assert output_labels == [0, 3]
|
IBT-FMI/SAMRI
|
samri/fetch/test/test_local.py
|
Python
|
gpl-3.0
| 2,202
| 0.052225
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_setting_value(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/manifest_xwalk_target_platforms/windows_platform/"
(return_code, output) = comm.getstatusoutput(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Loading 'android' platform backend", output[0])
self.assertNotIn("Loading 'windows' platform backend", output[0])
def test_without_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Loading 'android' platform backend", output[0])
def test_with_target_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/manifest_xwalk_target_platforms/android_platform/"
(return_code, output) = comm.getstatusoutput(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Loading 'android' platform backend", output[0])
if __name__ == '__main__':
unittest.main()
|
crosswalk-project/crosswalk-test-suite
|
apptools/apptools-android-tests/apptools/manifest_xwalk_target_platforms.py
|
Python
|
bsd-3-clause
| 6,310
| 0.000792
|
from nodetraq.tests import *
class TestPoolsController(TestController):
def test_index(self):
response = self.app.get(url(controller='pools', action='index'))
# Test response...
|
seryl/Nodetraq
|
nodetraq/tests/functional/test_pools.py
|
Python
|
mit
| 200
| 0.005
|
# this example shows how to append new calculated results to an already
# existing cmr file, illustrated for calculation of PBE energy on LDA density
import os
import cmr
# set True in order to use cmr in parallel jobs!
cmr.set_ase_parallel(enable=True)
from ase.structure import molecule
from ase.io import read, write
from ase.parallel import barrier, rank
from gpaw import GPAW, restart
from gpaw.test import equal
# define the project in order to find it in the database!
project_id = 'modify cmr file after gpw restart'
formula = 'H2'
vacuum = 2.0
xc = 'LDA'
mode = 'lcao'
h = 0.20
cmr_params = {
'db_keywords': [project_id],
# add project_id also as a field to support search across projects
'project_id': project_id,
# user's tags: xc tag will be set later for illustration purpose!
'formula': formula,
'vacuum': vacuum,
'mode': mode,
'h': h,
}
cmrfile = formula + '.cmr'
system1 = molecule(formula)
system1.center(vacuum=vacuum)
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, txt=None)
system1.set_calculator(calc)
e = system1.get_potential_energy()
calc.write(formula)
# read gpw file
system2, calc2 = restart(formula, txt=None)
# write the information 'as in' gpw file into db file
# (called *db to avoid conflict with the *cmr file below)
if 1: # not used in this example
calc2.write(formula + '.db', cmr_params=cmr_params)
# write the information 'as in' corresponding trajectory file into cmr file
write(cmrfile, system2, cmr_params=cmr_params)
# add the xc tag to the cmrfile
assert os.path.exists(cmrfile)
data = cmr.read(cmrfile)
data.set_user_variable('xc', xc)
data.write(cmrfile)
# peform PBE calculation on LDA density
ediff = calc2.get_xc_difference('PBE')
# add new results to the cmrfile
assert os.path.exists(cmrfile)
data = cmr.read(cmrfile)
data.set_user_variable('PBE', data['ase_potential_energy'] + ediff)
data.write(cmrfile)
# analyse the results with CMR
# cmr readers work only in serial!
from cmr.ui import DirectoryReader
if rank == 0:
reader = DirectoryReader(directory='.', ext='.cmr')
# read all compounds in the project with lcao
all = reader.find(name_value_list=[('mode', 'lcao')],
keyword_list=[project_id])
results = all.get('formula', formula)
print results['formula'], results['xc'], results['ase_potential_energy']
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['formula', 'xc', 'h', 'ase_potential_energy', 'PBE'])
if rank == 0:
equal(results['PBE'], e + ediff, 1e-6)
if rank == 0:
for file in [formula + '.gpw', formula + '.db', cmrfile]:
if os.path.exists(file): os.unlink(file)
|
ajylee/gpaw-rtxs
|
gpaw/test/cmr_append.py
|
Python
|
gpl-3.0
| 2,763
| 0.003257
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-22 16:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
m_model = apps.get_model("guidedmodules", "Module")
mq_model = apps.get_model("guidedmodules", "ModuleQuestion")
db_alias = schema_editor.connection.alias
for obj in mq_model.objects.all():
if obj.spec.get("module-id"):
mq_model.objects\
.filter(id=obj.id)\
.update(answer_type_module=m_model.objects.get(id=obj.spec["module-id"]))
class Migration(migrations.Migration):
dependencies = [
('guidedmodules', '0010_auto_20160809_1500'),
]
operations = [
migrations.AddField(
model_name='modulequestion',
name='answer_type_module',
field=models.ForeignKey(
blank=True,
help_text=(
'For module and module-set typed questions, this is the Module that'
' Tasks that answer this question must be for.'
),
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='is_type_of_answer_to',
to='guidedmodules.Module',
),
),
migrations.RunPython(forwards_func, migrations.RunPython.noop),
]
|
GovReady/govready-q
|
guidedmodules/migrations/0011_modulequestion_answer_type_module.py
|
Python
|
gpl-3.0
| 1,551
| 0.002579
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.authentication import (
BasicAuthentication,
BasicTokenAuthentication,
OAuthTokenAuthentication)
from msrestazure.azure_active_directory import (
InteractiveCredentials,
ServicePrincipalCredentials,
UserPassCredentials)
|
balajikris/autorest
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/SubscriptionIdApiVersion/microsoftazuretesturl/credentials.py
|
Python
|
mit
| 731
| 0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Hou Shaohui
#
# Author: Hou Shaohui <houshao55@gmail.com>
# Maintainer: Hou Shaohui <houshao55@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import gobject
from dtk.ui.menu import Menu
from dtk.ui.draw import draw_pixbuf
from dtk.ui.label import Label
from dtk.ui.utils import propagate_expose
from dtk.ui.constant import BUTTON_PRESS, BUTTON_NORMAL, BUTTON_HOVER
import dtk.ui.tooltip as Tooltip
from widget.skin import app_theme
from nls import _
class ComboItem(gtk.Button):
def __init__(self, bg_image_group, icon_group, index, set_index, get_index):
gtk.Button.__init__(self)
# Init.
self.index = index
self.set_index = set_index
self.get_index = get_index
self.icon_group = icon_group
self.bg_image_group = bg_image_group
self.resize_button()
# connect
self.connect("clicked", self.update_button_index)
self.connect("expose-event", self.expose_button_cb)
def expose_button_cb(self, widget, event):
# Init.
rect = widget.allocation
bg_normal_dpixbuf, bg_hover_dpixbuf, bg_press_dpixbuf = self.bg_image_group
fg_normal_dpixbuf, fg_hover_dpixbuf, fg_press_dpixbuf = self.icon_group
select_index = self.get_index()
bg_image = bg_normal_dpixbuf.get_pixbuf()
fg_image = fg_normal_dpixbuf.get_pixbuf()
if widget.state == gtk.STATE_NORMAL:
if select_index == self.index:
select_status = BUTTON_PRESS
else:
select_status = BUTTON_NORMAL
elif widget.state == gtk.STATE_PRELIGHT:
if select_index == self.index:
select_status = BUTTON_PRESS
else:
select_status = BUTTON_HOVER
elif widget.state == gtk.STATE_ACTIVE:
select_status = BUTTON_PRESS
if select_status == BUTTON_NORMAL:
bg_image = bg_normal_dpixbuf.get_pixbuf()
fg_image = fg_normal_dpixbuf.get_pixbuf()
elif select_status == BUTTON_HOVER:
bg_image = bg_hover_dpixbuf.get_pixbuf()
fg_image = fg_hover_dpixbuf.get_pixbuf()
elif select_status == BUTTON_PRESS:
bg_image = bg_press_dpixbuf.get_pixbuf()
fg_image = fg_press_dpixbuf.get_pixbuf()
image_width = bg_image.get_width()
image_height = bg_image.get_height()
fg_rect_x = rect.x + (image_width - fg_image.get_width()) / 2
fg_rect_y = rect.y + (image_height - fg_image.get_height()) / 2
cr = widget.window.cairo_create()
draw_pixbuf(cr, bg_image, rect.x, rect.y)
draw_pixbuf(cr, fg_image, fg_rect_x, fg_rect_y)
propagate_expose(widget, event)
return True
def resize_button(self):
normal_dpixbuf = self.bg_image_group[0]
request_width = normal_dpixbuf.get_pixbuf().get_width()
request_height = normal_dpixbuf.get_pixbuf().get_height()
self.set_size_request(request_width, request_height)
def update_icon_group(self, new_group):
self.icon_group = new_group
def update_button_index(self, widget):
self.set_index(self.index)
class ComboButton(gtk.Button):
def __init__(self, bg_image_group, icon_group):
gtk.Button.__init__(self)
# Init.
self.icon_group = icon_group
self.bg_image_group = bg_image_group
self.resize_button()
# connect
self.connect("expose-event", self.expose_button_cb)
def expose_button_cb(self, widget, event):
# Init.
rect = widget.allocation
bg_normal_dpixbuf, bg_hover_dpixbuf, bg_press_dpixbuf = self.bg_image_group
fg_normal_dpixbuf, fg_hover_dpixbuf, fg_press_dpixbuf = self.icon_group
if widget.state == gtk.STATE_NORMAL:
bg_image = bg_normal_dpixbuf.get_pixbuf()
fg_image = fg_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
bg_image = bg_hover_dpixbuf.get_pixbuf()
fg_image = fg_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
bg_image = bg_press_dpixbuf.get_pixbuf()
fg_image = fg_press_dpixbuf.get_pixbuf()
image_width = bg_image.get_width()
image_height = bg_image.get_height()
fg_rect_x = rect.x + (image_width - fg_image.get_width()) / 2
fg_rect_y = rect.y + (image_height - fg_image.get_height()) / 2
cr = widget.window.cairo_create()
draw_pixbuf(cr, bg_image, rect.x, rect.y)
draw_pixbuf(cr, fg_image, fg_rect_x, fg_rect_y)
propagate_expose(widget, event)
return True
def resize_button(self):
normal_dpixbuf = self.bg_image_group[0]
request_width = normal_dpixbuf.get_pixbuf().get_width()
request_height = normal_dpixbuf.get_pixbuf().get_height()
self.set_size_request(request_width, request_height)
def update_icon_group(self, new_group):
self.icon_group = new_group
class ComboMenuButton(gtk.HBox):
__gsignals__ = {
"list-actived" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"combo-actived" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,))
}
def __init__(self, init_index=0):
super(ComboMenuButton, self).__init__()
self.current_index = init_index
self.current_status = "artist"
self.set_spacing(0)
self.msg_content = _("By artist")
self.list_button = ComboItem(
(app_theme.get_pixbuf("combo/left_normal.png"),
app_theme.get_pixbuf("combo/left_hover.png"),
app_theme.get_pixbuf("combo/left_press.png")),
(app_theme.get_pixbuf("combo/list_normal.png"),
app_theme.get_pixbuf("combo/list_normal.png"),
app_theme.get_pixbuf("combo/list_press.png")
), 0, self.set_index, self.get_index)
Tooltip.text(self.list_button, _("List view"))
# draw left_button.
self.left_button = gtk.Button()
self.left_button = ComboItem(
(app_theme.get_pixbuf("combo/left_normal.png"),
app_theme.get_pixbuf("combo/left_hover.png"),
app_theme.get_pixbuf("combo/left_press.png")),
(app_theme.get_pixbuf("combo/artist_normal.png"),
app_theme.get_pixbuf("combo/artist_normal.png"),
app_theme.get_pixbuf("combo/artist_press.png")
), 1, self.set_index, self.get_index)
Tooltip.custom(self.left_button, self.get_msg_label).always_update(self.left_button, True)
# draw right_button.
self.right_button = ComboButton(
(app_theme.get_pixbuf("combo/right_normal.png"),
app_theme.get_pixbuf("combo/right_hover.png"),
app_theme.get_pixbuf("combo/right_hover.png")),
(app_theme.get_pixbuf("combo/triangle_normal.png"),
app_theme.get_pixbuf("combo/triangle_normal.png"),
app_theme.get_pixbuf("combo/triangle_press.png")
))
# signals.
self.left_button.connect("clicked", lambda w: self.emit_combo_signal())
self.right_button.connect("button-press-event", self.show_right_menu)
self.list_button.connect("clicked", lambda w: self.emit_list_signal())
# pack
combo_box = gtk.HBox()
combo_box.pack_start(self.left_button)
combo_box.pack_start(self.right_button)
self.pack_start(self.list_button)
self.pack_start(combo_box)
def show_right_menu(self, widget, event):
menu_items = [
(self.get_menu_pixbuf_group("artist"), _("by artist"), self.update_widget_icon, "artist", _("by artist")),
(self.get_menu_pixbuf_group("genre"), _("by genre"), self.update_widget_icon, "genre", _("by genre")),
(self.get_menu_pixbuf_group("album"), _("by album"), self.update_widget_icon, "album", _("by album")),
]
Menu(menu_items, True).show((int(event.x_root) - 10, int(event.y_root)))
def get_menu_pixbuf_group(self, name):
return (app_theme.get_pixbuf("combo/%s_press.png" % name), app_theme.get_pixbuf("combo/%s_hover.png" % name))
def update_widget_icon(self, name, tip_msg):
self.left_button.update_icon_group((
app_theme.get_pixbuf("combo/%s_normal.png" % name),
app_theme.get_pixbuf("combo/%s_normal.png" % name),
app_theme.get_pixbuf("combo/%s_press.png" % name)
))
self.set_index(1)
self.current_status = name
self.msg_content = tip_msg
self.emit_combo_signal()
def get_msg_label(self):
return Label(self.msg_content)
def set_index(self, index):
self.current_index = index
self.queue_draw()
def get_index(self):
return self.current_index
def emit_combo_signal(self):
self.emit("combo-actived", self.current_status)
def emit_list_signal(self):
self.emit("list-actived")
def get_combo_active(self):
return self.current_index == 1
|
hillwoodroc/deepin-music-player
|
src/widget/combo.py
|
Python
|
gpl-3.0
| 10,334
| 0.009386
|
from builtins import range
import sys
import unittest
import re
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from Exscript import Account
from Exscript.account import AccountPool
from Exscript.util.file import get_accounts_from_file
class AccountPoolTest(unittest.TestCase):
CORRELATE = AccountPool
def setUp(self):
self.user1 = 'testuser1'
self.password1 = 'test1'
self.account1 = Account(self.user1, self.password1)
self.user2 = 'testuser2'
self.password2 = 'test2'
self.account2 = Account(self.user2, self.password2)
self.accm = AccountPool()
def testConstructor(self):
accm = AccountPool()
self.assertEqual(accm.n_accounts(), 0)
accm = AccountPool([self.account1, self.account2])
self.assertEqual(accm.n_accounts(), 2)
def testAddAccount(self):
self.assertEqual(self.accm.n_accounts(), 0)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.n_accounts(), 1)
self.accm.add_account(self.account2)
self.assertEqual(self.accm.n_accounts(), 2)
def testReset(self):
self.testAddAccount()
self.accm.reset()
self.assertEqual(self.accm.n_accounts(), 0)
def testHasAccount(self):
self.assertEqual(self.accm.has_account(self.account1), False)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.has_account(self.account1), True)
def testGetAccountFromHash(self):
account = Account('user', 'test')
thehash = account.__hash__()
self.accm.add_account(account)
self.assertEqual(self.accm.get_account_from_hash(thehash), account)
def testGetAccountFromName(self):
self.testAddAccount()
self.assertEqual(self.account2,
self.accm.get_account_from_name(self.user2))
def testNAccounts(self):
self.testAddAccount()
def testAcquireAccount(self):
self.testAddAccount()
self.accm.acquire_account(self.account1)
self.account1.release()
self.accm.acquire_account(self.account1)
self.account1.release()
# Add three more accounts.
filename = os.path.join(os.path.dirname(__file__), 'account_pool.cfg')
self.accm.add_account(get_accounts_from_file(filename))
self.assertEqual(self.accm.n_accounts(), 5)
for i in range(0, 2000):
# Each time an account is acquired a different one should be
# returned.
acquired = {}
for n in range(0, 5):
account = self.accm.acquire_account()
self.assertTrue(account is not None)
self.assertNotIn(account.get_name(), acquired)
acquired[account.get_name()] = account
# Release one account.
acquired['abc'].release()
# Acquire one account.
account = self.accm.acquire_account()
self.assertEqual(account.get_name(), 'abc')
# Release all accounts.
for account in list(acquired.values()):
account.release()
def testReleaseAccounts(self):
account1 = Account('foo')
account2 = Account('bar')
pool = AccountPool()
pool.add_account(account1)
pool.add_account(account2)
pool.acquire_account(account1, 'one')
pool.acquire_account(account2, 'two')
self.assertNotIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('two')
self.assertIn(account1, pool.unlocked_accounts)
self.assertIn(account2, pool.unlocked_accounts)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(AccountPoolTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
maximumG/exscript
|
tests/Exscript/AccountPoolTest.py
|
Python
|
mit
| 4,218
| 0.000948
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class SapHanaLinkedService(LinkedService):
"""SAP HANA Linked Service.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param type: Constant filled by server.
:type type: str
:param server: Host name of the SAP HANA server. Type: string (or
Expression with resultType string).
:type server: object
:param authentication_type: The authentication type to be used to connect
to the SAP HANA server. Possible values include: 'Basic', 'Windows'
:type authentication_type: str or
~azure.mgmt.datafactory.models.SapHanaAuthenticationType
:param user_name: Username to access the SAP HANA server. Type: string (or
Expression with resultType string).
:type user_name: object
:param password: Password to access the SAP HANA server.
:type password: ~azure.mgmt.datafactory.models.SecureString
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'server': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'server': {'key': 'typeProperties.server', 'type': 'object'},
'authentication_type': {'key': 'typeProperties.authenticationType', 'type': 'str'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecureString'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, server, additional_properties=None, connect_via=None, description=None, authentication_type=None, user_name=None, password=None, encrypted_credential=None):
super(SapHanaLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description)
self.server = server
self.authentication_type = authentication_type
self.user_name = user_name
self.password = password
self.encrypted_credential = encrypted_credential
self.type = 'SapHana'
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/sap_hana_linked_service.py
|
Python
|
mit
| 3,345
| 0.001495
|
# -*- coding: UTF-8 -*-
import sys
WorkList = None
def SH(i):
"""reformatting .SH"""
global WorkList
string = WorkList[i]
l = len(string) - 2
r = 0
while string[0] == '=' and string[l] == '=':
WorkList[i] = string[1:l]
string = WorkList[i]
l = len(string) - 1
r = r + 1
if r == 2:
WorkList[i] = '\n.SH "' + string + '"\n.PP\n'
else:
WorkList[i] = '\n.SS "' + string + '"\n.PP\n'
#---------------------------------------------------------------------------
def TP(i):
"""reformatting .TP"""
global WorkList
string = WorkList[i]
l=0
string1 = WorkList[i + l]
while string1 != '' and string1[0] == ';':
j=0
finish = 0
nexcl = 1
s = 0
while len(string) > j and finish == 0:
if string[j:j+8] == '<nowiki>':
nexcl = 0
j = j + 7
elif string[j:j+9] == '</nowiki>':
nexcl = 1
j = j + 8
elif string[j:j+4] == '<!--':
nexcl = 0
j = j + 3
elif string[j:j+3] == '-->':
nexcl = 1
j = j + 2
if string[j] == ':':
s = 1
finish = nexcl * s
s = 0
j = j + 1
if len(string) == j:
WorkList[i] = '.TP\n.B ' + string[1:]
elif string[j-1] == ':':
WorkList[i] = '.TP\n.B ' + string[1:j-1] + '\n' + string[j:]
l = l + 1
string1 = WorkList[i+l]
while string1 != '' and string1[0] == ':' and string1[1] <> ':' and string1[1] <> ';':
WorkList[i + l] = '.br\n' + string1[1:]
l = l + 1
string1 = WorkList[i + l]
#---------------------------------------------------------------------------
def wiki2man(content):
global WorkList
string = '\n'
string = unicode(string, 'utf-8')
WorkList = [string]
cpt = 0
while string != '' and cpt < len(content):
string = content[cpt]
cpt += 1
WorkList.append(string)
path = sys.argv[0]
n = len(path)
n = n - 11
path = path[:n]
########## Reformatting from wiki to roff ##########
# TH:
string = WorkList[1];
if len(string) > 2 and string[0] != '=' and string[:4] != '<!--' and string[:2] != '{{':
i = 0
while len(string) > i and string[i] != '(':
i = i + 1
WorkList.pop(1)
WorkList.pop(0)
i = 0
tabacc = -1
tab = 0
tab2 = 0
col = 0
nf = 0
nr = 0
excl = 0
nowiki = 0
RS=0
strng = unicode('{{MAN индекс}}', 'utf-8')
while len(WorkList) > i:
string = WorkList[i]
if len(string) > 1:
# reformatting "nowiki"
if string[:9] == '</nowiki>':
WorkList[i] = string[9:]
nowiki = 0
if nowiki == 0:
# reformatting "pre"
if string[:6] == '</pre>':
WorkList[i] = '\n.fi\n.RE\n' + string[6:]
nf = 0
# reformatting "tt"
elif string[:5] == '</tt>':
if string[6:7] == '. ':
WorkList[i] = '\n.fi\n.RE\n' + string[7:]
elif len(string) > 6 and string[6] == '.':
WorkList[i] = '\n.fi\n.RE\n' + string[6:]
else:
WorkList[i] = '\n.fi\n.RE\n' + string[5:]
nf = 0
# reformatting " "
if string[0] == ' ':
if nf == 0:
nf = 1
WorkList[i] = '\n.RS\n.nf\n' + string
elif nf == 1:
WorkList[i] = string
else:
if nf == 1:
nf = 0
WorkList[i] = '\n.fi\n.RE\n'
WorkList.insert(i+1, string)
string = WorkList[i]
if nf != 2 and nowiki == 0:
# reformatting excluded text <!-- * -->
if excl == 1:
WorkList[i] = '.\" ' + string[0:]
string = WorkList[i]
if nf == 0:
# format titles
if string[0] == '=' and string[len(string)-2] == '=':
SH(i)
# format ";"
elif string[0] == ';':
TP(i)
# format ":..."
elif string[0] == ':':
l = 1
s = ''
while string[l] == ':':
l = l + 1;
if RS == l:
s = '\n.br\n'
elif RS < l:
while RS < l:
s = s + '.RS\n'
RS = RS + 1
if string[RS] == ';':
WorkList[i] = s + '.TP\n.B ' + string[RS+1:]
else:
WorkList[i] = s + string[RS:]
string = WorkList[i]
stri = WorkList[i+1]
if RS > 0 and stri[0] <> ':':
while RS > 0:
WorkList[i] = string + '\n.RE\n'
RS = RS - 1
string = WorkList[i]
else:
while RS > 0 and len(stri) > RS-1 and stri[RS-1] <> ':':
RS = RS - 1
WorkList[i] = string + '\n.RE\n'
string = WorkList[i]
# format "*..."
elif string[0] == '*':
WorkList[i] = '.br\n * ' + string[1:]
# format tables 2
elif string[:2] == '{|':
if tab2 > 0:
WorkList[i] = '.RS\n'
tab2 = tab2 + 1
col = 0
else:
WorkList[i] = ''
tab2 = 1
elif string[:2] == '|-' and tab2 > 0:
WorkList[i] = ''
col = 0
elif string[:2] == '|}':
if tab2 == 1:
WorkList[i] = ''
col = 0
tab2 = 0
elif tab2 > 1:
WorkList[i] = '\n.RE\n'
col = 0
tab2 = tab2 - 1
elif string[:8] == '|valign=' and tab2 > 0:
j = 9
while len(string) > j and string[j]!='|':
j = j + 1
if string[j] == '|':
if col == 0:
WorkList[i] = '\n.TP\n' + string[j+1:]
col = 1
elif col > 0:
WorkList[i] = string[j+1:]
col = 2
elif col > 1:
WorkList[i] = '.PP\n' + string[j+1:]
col = col + 1
elif string[:1] == '|' and tab2 > 0:
if col == 0:
WorkList[i] = '\n.TP\n' + string[1:]
col = 1
elif col == 1:
WorkList[i] = string[1:]
col = col + 1
elif col > 1:
WorkList[i] = '\n' + string[1:]
col = col + 1
# delete wiki "Category:"
elif string[:11] == '[[Category:':
WorkList[i] = ''
# delete wiki {{MAN индекс}}
elif string[:14] == strng:
WorkList[i] = ''
# delete wiki [[en:Man ...]]
elif string[:9] == '[[en:Man ':
WorkList[i] = ''
string = WorkList[i]
j = 0
B = -1
I = -1
U = -1
K = -1
K1 = -1
while len(string) > j:
# reformatting excluded text <!-- * -->
if string[j:j+4] == '<!--':
string = string[:j] + '\n.\"' + string[j+4:]
excl = 1
j = j + 1
elif string[j:j+3] == '-->':
string = string[:j] + '\n' + string[j+3:]
excl = 0
j = j - 1
if excl == 0:
# Change some symbols: — « » — © " & < >
if string[j:j+8] == '―':
string = string[:j] + unicode('—', 'utf-8') + string[j+8:]
elif string[j:j+7] == '«':
string = string[:j] + unicode('«', 'utf-8') + string[j+7:]
elif string[j:j+7] == '»':
string = string[:j] + unicode('»', 'utf-8') + string[j+7:]
elif string[j:j+7] == '—':
string = string[:j] + unicode('—', 'utf-8') + string[j+7:]
elif string[j:j+6] == '©':
string = string[:j] + unicode('©', 'utf-8') + string[j+6:]
elif string[j:j+6] == '"':
string = string[:j] + unicode('"', 'utf-8') + string[j+6:]
elif string[j:j+6] == ' ':
string = string[:j] + unicode(' ', 'utf-8') + string[j+6:]
elif string[j:j+5] == '&':
string = string[:j] + unicode('&', 'utf-8') + string[j+5:]
elif string[j:j+4] == '<':
string = string[:j] + unicode('<', 'utf-8') + string[j+4:]
elif string[j:j+4] == '>':
string = string[:j] + unicode('>', 'utf-8') + string[j+4:]
# reformatting "-" or "\"
elif string[j:j+1] == '-':
string = string[0:j] + '\\' + string[j:]
j = j + 1
elif string[j:j+1] == '\\':
string = string[0:j] + '\e' + string[j+1:]
j = j + 1
# reformatting "nowiki"
elif string[j:j+8] == '<nowiki>':
nowiki = 1
if nf != 2:
string = string[:j] + string[j+8:]
j = j
elif string[j:j+9] == '</nowiki>':
nowiki = 0
if nf != 2:
string = string[:j] + string[j+9:]
j = j
if nowiki == 0:
if string[j:j+5] == "'''''":
if B != -1 and I == -1 :
if tabacc == 1:
string = string[:B] + '"' + string[B+3:j] + '"' + string[j+3:]
j = j - 4
B =- 1
else:
string = string[:B] + '\\fB' + string[B+3:j] + '\\fR' + string[j+3:]
j = j + 1
B =- 1
if I != -1 and B == -1:
string = string[:I] + '\\fI' + string[I+2:j] + '\\fR' + string[j+2:]
j = j + 2
I =- 1
# reformatting boolean text 1
elif string[j:j+3] == "'''":
if B == -1:
B = j
else:
if tabacc == 1:
string = string[:B] + '"' + string[B+3:j] + '"' + string[j+3:]
j = j - 4
B =- 1
elif j+3-B > 5:
string = string[:B] + '\\fB' + string[B+3:j] + '\\fR' + string[j+3:]
j = j + 1
B =- 1
# reformatting italic text 1
elif string[j:j+2] == "''" and B == -1:
if I == -1:
I = j
else:
if j+3-I > 2:
string = string[:I] + '\\fI' + string[I+2:j] + '\\fR' + string[j+2:]
j = j + 2
I =- 1
# reformatting "pre"
elif string[j:j+5] == '<pre>':
string = string[:j] + '\n.RS\n.nf\n' + string[j+5:]
nf = 2
j = j + 3
elif string[j:j+6] == '</pre>':
string = string[:j] + '\n.fi\n.RE\n' + string[j+6:]
nf = 0
j = j + 3
# reformatting "code"
elif string[j:j+6] == '<code>':
string = string[:j] + '\n.nf\n' + string[j+6:]
nf = 2
j = j + 3
elif string[j:j+7] == '</code>':
string = string[:j] + '\n.fi\n' + string[j+7:]
nf = 0
j = j + 3
# reformatting "tt"
elif string[j:j+4] == '<tt>':
string = string[:j] + '\n.RS\n.nf\n' + string[j+4:]
nf = 2
j = j + 3
elif string[j:j+5] == '</tt>':
if string[j+5] == '.':
string = string[:j] + '\n.fi\n.RE\n' + string[j+6:]
else:
string = string[:j] + '\n.fi\n.RE\n' + string[j+5:]
nf = 0
j = j + 3
# reformatting "...}}"
elif string[j:j+2] == '}}':
if nr == 1:
string = string[:j] + '\\fR' + string[j+2:]
nr = 0
j = j + 2
elif nr == 2:
string = string[:j] + '\n.RE\n' + string[j+2:]
nr = 0
j = j + 3
# reformatting "{{Codeline|...}}"
elif string[j:j+11] == '{{Codeline|':
string = string[:j] + '\\fB' + string[j+11:]
nr = 1
j = j + 2
# reformatting "{{Warning|...}}"
elif string[j:j+10] == '{{Warning|':
string = string[:j] + '\\fB' + string[j+10:]
nr = 1
j = j + 2
# reformatting "{{Note|...}}"
elif string[j:j+7] == '{{Note|':
string = string[:j] + '\\fI' + string[j+7:]
nr = 1
j = j + 2
# reformatting "{{Discussion|...}}"
elif string[j:j+13] == '{{Discussion|':
string = string[:j] + '\\fI' + string[j+13:]
nr = 1
j = j + 2
# reformatting "{{Filename|...}}"
elif string[j:j+11] == '{{Filename|':
string = string[:j] + '\\fI' + string[j+11:]
nr = 1
j = j + 2
# reformatting "[mailto:...]"
elif string[j:j+8] == '[mailto:':
a = j + 8
while string[a] <> ' ':
a = a + 1
b = a + 1
while string[b] <> ']':
b = b + 1
string = string[:j] + string[a+1:b] + ' <' + string[j+8:a] + '>'
# reformatting "{{Box File|...|...}}"
elif string[j:j+11] == '{{Box File|':
a = j + 11
while string[a] <> '|':
a = a + 1
string = string[:j] + '\n.TP\n.B ' + string[j+11:a] + '\n.RS\n' + string[a+1:]
nr = 2
if nf == 0:
# reformatting boolean text 2
if string[j:j+3] == '<b>':
string = string[:j] + '\\fB' + string[j+3:]
j = j + 2
elif string[j:j+4] == '</b>':
string = string[:j] + '\\fR' + string[j+4:]
j = j + 2
# reformatting italic text 2
elif string[j:j+3] == '<i>':
string = string[:j] + '\\fI' + string[j+3:]
j = j + 2
elif string[j:j+4] == '</i>':
string = string[:j] + '\\fR' + string[j+4:]
j = j + 2
# format underlined text
elif string[j:j+3] == '<u>':
U = j
elif string[j:j+4] == '</u>' and U != -1:
string = string[:U] + '\\fB\\fI' + string[U+3:j] + '\\fB\\fR' + string[j+4:]
j = j + 7
U =- 1
# brake line 1
elif string[j:j+4] == '<br>':
string = string[0:j] + '\n.br\n' + string[j+4:]
j = j + 2
# brake line 2
elif string[j:j+6] == '<br />':
string = string[0:j] + '\n.PP\n' + string[j+6:]
j = j + 2
# format tables 1
elif string[j:j+6] == '<table':
tab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:tab] + string[j+1:]
j = tab - 1
tab = 1
else:
j = tab
tab = 0
elif string[j:j+3] == '<tr':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc = 0
string = string[:Ktab] + '\n.SS ' + string[j+1:]
j = Ktab + 4
else:
j = Ktab
elif string[j:j+4] == '</tr':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc =- 1
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
j = Ktab
elif string[j:j+3] == '<td':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc = tabacc + 1
if tabacc == 1:
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
string = string[:Ktab] + '\n.PP\n' + string[j+1:]
j = Ktab + 3
else:
j = Ktab
elif string[j:j+4] == '</td':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
j = Ktab
elif string[j:j+7] == '</table':
tab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:tab] + string[j+1:]
j = tab - 1
tab = 0
else:
j = tab
tab = 1
# format table 2 {| |- | || |}
elif string[j:j+2] == '||' and tab2 > 0 and col > 0:
string = string[:j] + '\n' + string[j+2:]
col = col + 1
# format div????
elif string[j:j+4] == '<div':
div = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:div] + string[j+1:]
j = div - 1
else:
j = div
elif string[j:j+5] == '</div':
div = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:div] + string[j+1:]
j = div - 1
else:
j = div
# format internal links
elif string[j:j+2] == '[[':
K = j
elif string[j] == '|':
if K != -1:
K1 = j
elif string[j:j+2] == ']]':
if K != -1 and K1 != -1:
string = string[:K] + string[K1+1:j] + string[j+2:]
j = j - K1 + K - 2
K =- 1
K1 =- 1
elif K != -1 and K1 == -1:
string = string[:K] + string[K+2:j] + string[j+2:]
j = j - 4
K =- 1
j = j + 1
WorkList[i] = string
i = i + 1
# Make title .TH
string = '\n'
string = string.encode('utf-8')
string = unicode(string, 'utf-8')
WorkList.insert(0, string)
########## Output roff formatted file ##########
# Output encoded symbols:
string = ''
for i in range(len(WorkList)):
string = string + WorkList[i]
# Delete empty lines and some think else..., just for making roff code better:
i = 0
while len(string) > i:
if string[i:i+8] == '.RE\n\n.RS':
string = string[:i+3] + string[i+4:]
if string[i:i+8] == '.RE\n\n.br':
string = string[:i+3] + string[i+4:]
if string[i:i+6] == '\n.SS\n':
string = string[:i+5] + string[i+6:]
if string[i:i+5] == '\n\n.RE':
string = string[:i+1] + string[i+2:]
if string[i:i+5] == '\n\n\n\n\n':
string = string[:i] + string[i+3:]
if string[i:i+4] == '\n\n\n\n':
string = string[:i] + string[i+2:]
if string[i:i+3] == '\n\n\n':
string = string[:i] + string[i+1:]
i = i + 1
return string
#---------------------------------------------------------------------------
|
franck-talbart/codelet_tuning_infrastructure
|
ctr-common/plugins/4e7420cd-904e-4c2a-b08f-02c867ba4cd8/wiki2man.py
|
Python
|
gpl-3.0
| 27,388
| 0.005226
|
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl
def random_num_op(low, high):
"""Generate a random number between low and high."""
return dsl.ContainerOp(
name='Generate random number',
image='python:alpine3.6',
command=['sh', '-c'],
arguments=['python -c "import random; print(random.randint($0, $1))" | tee $2', str(low), str(high), '/tmp/output'],
file_outputs={'output': '/tmp/output'}
)
def flip_coin_op():
"""Flip a coin and output heads or tails randomly."""
return dsl.ContainerOp(
name='Flip coin',
image='python:alpine3.6',
command=['sh', '-c'],
arguments=['python -c "import random; result = \'heads\' if random.randint(0,1) == 0 '
'else \'tails\'; print(result)" | tee /tmp/output'],
file_outputs={'output': '/tmp/output'}
)
def print_op(msg):
"""Print a message."""
return dsl.ContainerOp(
name='Print',
image='alpine:3.6',
command=['echo', msg],
)
@dsl.pipeline(
name='Conditional execution pipeline',
description='Shows how to use dsl.Condition().'
)
def flipcoin_pipeline():
flip = flip_coin_op()
with dsl.Condition(flip.output == 'heads'):
random_num_head = random_num_op(0, 9)
with dsl.Condition(random_num_head.output > 5):
print_op('heads and %s > 5!' % random_num_head.output)
with dsl.Condition(random_num_head.output <= 5):
print_op('heads and %s <= 5!' % random_num_head.output)
with dsl.Condition(flip.output == 'tails'):
random_num_tail = random_num_op(10, 19)
with dsl.Condition(random_num_tail.output > 15):
print_op('tails and %s > 15!' % random_num_tail.output)
with dsl.Condition(random_num_tail.output <= 15):
print_op('tails and %s <= 15!' % random_num_tail.output)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(flipcoin_pipeline, __file__ + '.yaml')
|
kubeflow/kfp-tekton-backend
|
samples/core/condition/condition.py
|
Python
|
apache-2.0
| 2,572
| 0.001555
|
import pandas as pd
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
df = pd.read_csv(url, names=names)
array = df.values
X = array[:,0:8]
y = array[:,8]
seed = 21
num_trees = 100
max_features = 3
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = RandomForestClassifier(n_estimators=num_trees, max_features=max_features)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print('results: ')
print(results)
print()
print('mean: ' + str(results.mean()))
|
sindresf/The-Playground
|
Python/Machine Learning/ScikitClassifiers/Classifiers/Random_Forrest_Classification.py
|
Python
|
mit
| 724
| 0.008287
|
from resource_media_types import *
from ClientFile import ClientFile
from ClientFolder import ClientFolder
from queryResource import ClientQuery
class ResourcesTypeResolverUtil(object):
classes = {}
#classes[ClientAdhocDataView.__name__] = ResourceMediaType.ADHOC_DATA_VIEW_MIME
#classes[ClientAwsDataSource.__name__] = ResourceMediaType.AWS_DATA_SOURCE_MIME
#classes[ClientBeanDataSource.__name__] = ResourceMediaType.BEAN_DATA_SOURCE_MIME
#classes[ClientCustomDataSource.__name__] = ResourceMediaType.CUSTOM_DATA_SOURCE_MIME
#classes[ClientDataType.__name__] = ResourceMediaType.DATA_TYPE_MIME
classes[ClientFile.__name__] = TYPE_FILE
classes[ClientFolder.__name__] = TYPE_FOLDER
#classes[ClientInputControl.__name__] = ResourceMediaType.INPUT_CONTROL_MIME
#classes[ClientJdbcDataSource.__name__] = ResourceMediaType.JDBC_DATA_SOURCE_MIME
#classes[ClientJndiJdbcDataSource.__name__] = ResourceMediaType.JNDI_JDBC_DATA_SOURCE_MIME
#classes[ClientListOfValues.__name__] = ResourceMediaType.LIST_OF_VALUES_MIME
#put(ClientMondrianConnection.class, ResourceMediaType.MONDRIAN_CONNECTION_MIME);
#put(ClientMondrianXmlaDefinition.class, ResourceMediaType.MONDRIAN_XMLA_DEFINITION_MIME);
#put(ClientOlapUnit.class, ResourceMediaType.OLAP_UNIT_MIME);
classes[ClientQuery.__name__] = TYPE_QUERY
#put(ClientReportUnit.class, ResourceMediaType.REPORT_UNIT_MIME);
#put(ClientSecureMondrianConnection.class, ResourceMediaType.SECURE_MONDRIAN_CONNECTION_MIME);
#put(ClientSemanticLayerDataSource.class, ResourceMediaType.SEMANTIC_LAYER_DATA_SOURCE_MIME);
#put(ClientVirtualDataSource.class, ResourceMediaType.VIRTUAL_DATA_SOURCE_MIME);
#put(ClientXmlaConnection.class, ResourceMediaType.XMLA_CONNECTION_MIME);
#put(ClientResourceLookup.class, ResourceMediaType.RESOURCE_LOOKUP_MIME);
#put(ClientDashboard.class, ResourceMediaType.DASHBOARD_MIME);
#put(ClientDomainTopic.class, ResourceMediaType.DOMAIN_TOPIC_MIME);
@staticmethod
def getMimeType(resource):
if isinstance(resource, basestring):
#return resource as default in the case resource to be of direct type from resource_media_types
ret = ResourcesTypeResolverUtil.classes.get(resource, resource)
else:
ret = ResourcesTypeResolverUtil.classes.get(resource.__class__.__name__, None)
return ret
|
saguas/jasperserverlib
|
jasperserverlib/core/ResourcesTypeResolverUtil.py
|
Python
|
mit
| 2,430
| 0.017284
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import unittest
class TestONNXWhile(serial.SerializedTestCase):
@serial.given(
condition=st.booleans(),
max_trip_count=st.integers(0, 100),
save_scopes=st.booleans(),
disable_scopes=st.booleans(),
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, disable_scopes, seed, gc, dc):
np.random.seed(seed)
if disable_scopes:
save_scopes = False
# Create body net
body_net = caffe2_pb2.NetDef()
# Two loop carried dependencies: first and second
body_net.external_input.extend(['i', 'cond', 'first', 'second'])
body_net.external_output.extend(['cond_new', 'second', 'third', 'third'])
add_op = core.CreateOperator(
'Add',
['first', 'second'],
['third'],
)
print3 = core.CreateOperator(
'Print',
['third'],
[],
)
limit_const = core.CreateOperator(
'ConstantFill',
[],
['limit_const'],
shape=[1],
dtype=caffe2_pb2.TensorProto.FLOAT,
value=100.0,
)
cond = core.CreateOperator(
'LT',
['third', 'limit_const'],
['cond_new'],
)
body_net.op.extend([add_op, print3, limit_const, cond])
while_op = core.CreateOperator(
'ONNXWhile',
['max_trip_count', 'condition', 'first_init', 'second_init'],
['first_a', 'second_a', 'third_a'],
body=body_net,
has_cond=True,
has_trip_count=True,
save_scopes=save_scopes,
disable_scopes=disable_scopes,
)
condition_arr = np.array(condition).astype(np.bool)
max_trip_count_arr = np.array(max_trip_count).astype(np.int64)
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
third = first + second
first = second
second = third
results.append(third)
if third > 100:
break
return (first, second, np.array(results).astype(np.float32))
self.assertReferenceChecks(
gc,
while_op,
[max_trip_count_arr, condition_arr, first_init, second_init],
ref,
)
if __name__ == "__main__":
unittest.main()
|
ryfeus/lambda-packs
|
pytorch/source/caffe2/python/operator_test/onnx_while_test.py
|
Python
|
mit
| 3,154
| 0.000951
|
# -*- coding: utf-8 -*-
import sys
def get_skeleton(N, strings):
skeletons = []
for i in range(N):
skeleton = [strings[i][0]]
skeleton += [strings[i][j] for j in range(1, len(strings[i])) if strings[i][j] != strings[i][j-1]]
skeletons.append(skeleton)
for i in range(1, N):
if skeletons[i] != skeletons[i-1]:
skeletons[0] = []
break
return skeletons[0]
def solve():
N = int(input())
strings = [input() for _ in range(N)]
ans = 0
skeleton = get_skeleton(N, strings)
if len(skeleton) == 0:
return 'Fegla Won'
lengths = []
for c in skeleton:
length = dict()
for i in range(N):
for j in range(len(strings[i])):
if strings[i][j] != c:
break
length[j] = length.get(j, 0) + 1
strings[i] = strings[i][j:] if j < len(strings[i]) else ''
lengths.append(length)
for length in lengths:
ans += min(sum(abs(k - l) * length[l] for l in length) for k in length)
return ans
def main():
T = int(input())
for i in range(1, T + 1):
print('Case #{}: {}'.format(i, solve()))
if __name__ == '__main__':
sys.exit(main())
|
changyuheng/code-jam-solutions
|
2014/Round 1B/A.py
|
Python
|
mit
| 1,248
| 0.004006
|
import copy
import sys
from functools import update_wrapper
from future_builtins import zip
import django.db.models.manager # Imported to register signal handler.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something weird with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
if values:
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field = opts.get_field(field_name)
field_label = capfirst(field.verbose_name)
# Insert the error into the error dict, very sneaky
return field.error_messages['unique'] % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
lzw120/django
|
django/db/models/base.py
|
Python
|
bsd-3-clause
| 39,705
| 0.002393
|
import urllib
import urllib2
import threading
BaseProp = 361995
EndProp = 362044
proxies = {'http': 'http://localhost:8080'}
# handles getting the owner pdf listing of the home
class DoNothingRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
return headers
opener = urllib2.build_opener(DoNothingRedirectHandler())
data_sheet_url = 'http://.org/appraisal/publicaccess/PropertyDataSheet.aspx?PropertyID=%s&PropertyOwnerID=%s&NodeID=11'
get_data_location = lambda propId,ownerId: opener.open(urllib2.Request(data_sheet_url%(propId, ownerId)))
get_data_pdf = lambda url: urllib.urlopen(url).read()
def get_pdf_listing(propId, ownerId, owner_name):
fname = owner_name.replace(" ","_").replace(",","").replace("&","and")+".pdf"
x = get_data_location(propId, ownerId)
if x.headers:
url = x.getheader('Location')
if url == "":
return
url = "http://.org/"+url.split("#")[0]
pdf = get_data_pdf(url)
f = open(fname,'wb')
f.write(pdf)
print "wrote pdf", fname
MAIN_HISTORY = {}
PropIds = []
for i in xrange(BaseProp, EndProp):
PropIds.append('R%d'%i)
CurrentYear = 2010
EndYear = 2006
history_url = "http://.org/appraisal/publicaccess/PropertyHistory.aspx?PropertyID=%s&PropertyOwnerID=%s&NodeID=11&FirstTaxYear=%d&LastTaxYear=%d"
get_history_page = lambda propId,ownerId,eyear,syear:urllib.urlopen(history_url%(propId,ownerId,syear,eyear)).read()
HistorySplit = lambda data: "".join(data.split('<HistoryResults>')[1]).split('</HistoryResults>')[0].strip()
HistoryTaxYearSplit = lambda data: data.split('History TaxYear="')[1].split('" ')[0].strip()
HistroyNameSplit = lambda data: data.split('Name="')[1].split('" ')[0].strip()
HistroyValueSplit = lambda data: data.split('Value="')[1].split('" ')[0].strip()
def process_history(propId,ownerId, cyear, eyear):
history_data = {}
page_data = get_history_page(propId,ownerId, cyear-1, eyear)
if page_data.find("<HistoryResults>") == -1:
print "page has no relevant history"
return {}
for line in HistorySplit(page_data).splitlines():
if line.find("History TaxYear=") == -1:
continue
line = line.strip()
if line == "":
continue
year = HistoryTaxYearSplit(line)
name = HistroyNameSplit(line)
value = HistroyValueSplit(line)
if not year in history_data:
history_data[year] = {}
history_data[year][name] = value
return history_data
# get the required segments for the square footages and ammendments
init_imp_page = 'http://.org/appraisal/publicaccess/PropertyImpDetail.aspx?CurrPosition=1&LastPosition=1&PropertyID=%s&PropertyOwnerID=0&NodeID=11'
extract_segments = lambda data: int(data.split('<td class="ssDetailLabel" nowrap="true">Segments</td><td class="ssDetailData" nowrap="true">')[1].split("</td>")[0])
get_segments = lambda propId: extract_segments(urllib.urlopen(init_imp_page%propId).read())
segments_url = "http://.org/appraisal/publicaccess/PropertyImpSegDetail.aspx"
segments_data = lambda current,last,propID: "CurrSegPosition=%d&LastSegPosition=%d&CurrPosition=1&LastPosition=1&TaxYear=2008&PropertyID=%s&PropertyOwnerID=0&NodeID=11&dbKeyAuth=Appraisal"%(current,last,propID)
segments_post = lambda data:urllib.urlopen(segments_url,data=data).read()
def traverse_segments(start, end, propId):
imps = {"MA (Main Area)":"Main Floor",
"MA2.0 (Main Area 2nd Flr)":"Second Floor",
"Garage":"Garage",
"Porch":"Porch"}
functions = {"Second Floor":[SecondFlrSqSplit, SecondFlrVaSplit],
"Main Floor":[MainFlrSqSplit, MainFlrVaSplit],
"Porch":[PorchFlrSqSplit, PorchFlrVaSplit],
"Garage":[GarageFlrSqSplit, GarageFlrVaSplit]}
results = {}
for i in xrange(start, end+1):
data = segments_data(i,end,propId)
page_data = segments_post(data)
for i in imps.keys():
if page_data.find(i) > -1:
name = imps[i]
results[name+" SQ Footage"] = functions[name][0](page_data).strip()
results[name+" Value"] = "$"+functions[name][1](page_data).split('$')[1].strip()
break
return results
header_string = "Address,Name,ID,Legal Description,Year,% inc,Value,Land ($),sq ft,$/sq ft,House ($),Sq ft,$/sq ft,1st floor ($),Sq ft,$/sq ft,2nd floor ($),Sq ft,$/sq ft,Garage ($),Sq ft,$/sq ft,Porch ($),Sq ft,$/sq ft"
split_str2 = '%s:</td><td class="ssDetailData" valign="top">'
split_field2 = lambda sstr, data: data.split(split_str2%sstr)[1].split("</td>")[0]
split_str3 = '<td class="ssDetailLabel">%s:</td><td class="ssDetailData" width="125px" align="right">'
def split_field3(sstr, data):
return data.split(split_str3)[1].split(' ')[0]
ExemptionSplit = lambda data: data.split('Exemption Codes:</label></td><td><table cellpadding="0" cellspacing="0"><tr><td class="ssDetailData">')[1].split('</td></tr></table></td></tr><tr xmlns:msxsl="urn:schemas-microsoft-com:xslt" xmlns:tyl="http://www.tsgweb.com"><td id="tdEntity" class="ssDetailLabel" valign="top">')[0]
Exemptions = lambda data: ",".join([i.split('(')[0].strip() for i in ExemptionSplit(data).split(" <br />")])
CleanupAddr = lambda addr: addr.replace("<br />","")
# pull out address name, etc on first page
AddrSplit = lambda data: CleanupAddr(data.split('Property Address:</td><td class="ssDetailData" valign="top">')[1].split('</td>')[0])
NameSplit = lambda data: data.split('Owner Name:</td><td class="ssDetailData">')[1].split('</td>')[0].replace("&","&")
YearSplit = lambda data:"2010"
PropID = lambda data: data.split('Property Detail Sheet (')[1].split(')')[0]
LDescSplit = lambda data: data.split('Legal Description:</td><td class="ssDetailData">')[1].split('</td>')[0]
# Pull out values from summary stuff
AppraisedSplit = lambda data: data.split('Appraised:</td><td class="ssDetailData" width="125px" align="right">')[1].split(' ')[0]
LandHSSplit = lambda data: data.split('Land HS:</td><td class="ssDetailData" width="125px" align="right">')[1].split(' ')[0]
ImprovementHSSplit = lambda data: data.split('Improvement HS:</td><td class="ssDetailData" width="125px" align="right">')[1].split(' ')[0]
HomeSteadCapSplit = lambda data: data.split('Homestead Cap:</td><td class="ssDetailData" width="125px" align="right">')[1].split(' ')[0]
AssessedSplit = lambda data: data.split('Assessed:</td><td class="ssDetailData" width="125px" align="right">')[1].split(' ')[0]
OwnerIdSplit = lambda data: data.split('Owner ID:</td><td class="ssDetailData">')[1].split('</td>')[0]
# Pull out values and areas of floors
split_str = '<td class="ssDetailPageLabel" nowrap="1">%s</td><td class="ssDetailPageData" nowrap="1">'
split_field = lambda sstr, data: data.split(split_str %sstr)[1].split('</td>')[0]
MainFlrSqSplit = lambda data: split_field("Area", data)
MainFlrVaSplit = lambda data: split_field("Value", data)
SecondFlrSqSplit = lambda data: split_field("Area", data)
SecondFlrVaSplit = lambda data: split_field("Value", data)
PorchFlrSqSplit = lambda data: split_field("Area", data)
PorchFlrVaSplit = lambda data: split_field("Value", data)
GarageFlrSqSplit = lambda data: split_field("Area", data)
GarageFlrVaSplit = lambda data: split_field("Value", data)
main_url = 'http://.org/appraisal/publicaccess/PropertyDetail.aspx?PropertyID=%s&dbKeyAuth=Appraisal&TaxYear=%s&NodeID=11&PropertyOwnerID=%s'
get_main_page = lambda propId,cyear,ownerId: urllib.urlopen(main_url%(propId, cyear,ownerId), proxies=proxies).read()
THREADS = []
main_search = "http://.org/appraisal/publicaccess/PropertySearch.aspx?PropertySearchType=1&SelectedItem=10&PropertyID=&PropertyOwnerID=&NodeID=11"
search_data = lambda prop_value,cyear: "PropertyID=%s&PropertySearchType=1&NodeID=11&dbKeyAuth=Appraisal&TaxYear=%s&SearchSubmit=Search"%(prop_value, cyear)
get_property = lambda data: urllib.urlopen("http://.org/appraisal/publicaccess/PropertySearchResults.aspx",data=data).read()
def get_propId(property,cyear):
d = search_data(property,cyear)
#print d
page = get_property(d)
#print page
print page.split("ViewPropertyOrOwners(")[1].split(")")[0].replace(",","").split()
propId = page.split("ViewPropertyOrOwners(")[1].split(")")[0].replace(",","").split()[-1].strip()
ownerId = page.split("ViewPropertyOrOwners(")[1].split(")")[0].replace(",","").split()[-2].strip()
print "Identified PropertyId: ", propId, "OwnerId: ", ownerId
return propId, ownerId
imp_url = 'http://.org/appraisal/publicaccess/PropertyImpDetail.aspx?CurrPosition=%d&LastPosition=4&PropertyID=%s&PropertyOwnerID=%s&NodeID=11'
def get_rland_sf(propId,ownerId):
url_str = 'http://.org/appraisal/publicaccess/PropertyLandDetail.aspx?CurrPosition=2&LastPosition=2&PropertyID=%s&PropertyOwnerID=%s&NodeID=11'
pdata = urllib.urlopen(url_str%(propId,ownerId)).read()
sz_str = 'Size - Square Feet</td><td class="ssDetailPageData" nowrap="1">'
value = pdata.split(sz_str)[1].split("</td>")[0]
return value
def get_land_sf(propId,ownerId):
url_str = 'http://.org/appraisal/publicaccess/PropertyLandDetail.aspx?CurrPosition=1&LastPosition=2&PropertyID=%s&PropertyOwnerID=%s&NodeID=11'
pdata = urllib.urlopen(url_str%(propId,ownerId)).read()
sz_str = 'Size - Square Feet</td><td class="ssDetailPageData" nowrap="1">'
value = pdata.split(sz_str)[1].split("</td>")[0]
return value
def get_improvements(propId, ownerId, pdata):
x = pdata.split(' class="ssPropertyLink" style="color:#194274; font-weight:bolder;">Imp')
imp = {}
value_str = '''class="ssPropertyLink" style="color:#194274; font-weight:bolder;">Imp'''
if x > 1:
value = pdata.split(value_str)[1].split("$")[1].split("</td>")[0].strip()
imp["Improvements 1"] = "$"+value
if x > 2:
# url = imp_url%(2,propId, ownerId)
# data = urllib.urlopen(url).read()
# print data.split(value_str)
# value = data.split(value_str)[1].split("</td>")[0].strip()
# value = "$"+value.replace("$","").strip()
# imp["Improvements 2"] = value
value = pdata.split(value_str)[2].split("$")[1].split("</td>")[0].strip()
imp["Improvements 2"] = "$"+value
elif x > 3:
# url = imp_url%(3,propId, ownerId)
# data = urllib.urlopen(url).read()
# value = data.split(value_str)[1].split("</td>")[0].strip()
# value = "$"+value.replace("$","").strip()
# imp["Improvements 3"] = value
value = pdata.split(value_str)[3].split("$")[1].split("</td>")[0].strip()
imp["Improvements 3"] = "$"+value
elif x > 4:
# url = imp_url%(4,propId, ownerId)
# data = urllib.urlopen(url).read()
# value = data.split(value_str)[1].split("</td>")[0].strip()
# value = "$"+value.replace("$","").strip()
# imp["Improvements 4"] = value
pass
return imp
PrimarySite = 'S1 (Primary Site)</td><td align="left" nowrap="true" style="overflow-x:hidden;" class="ssDataColumn">A1 (A1 - Residential Single Family)</td><td align="left" nowrap="true" class="ssDataColumn"></td><td align="right" nowrap="true" class="ssDataColumn">'
ResidualLand = 'S3 (Residual Land)</td><td align="left" nowrap="true" style="overflow-x:hidden;" class="ssDataColumn">A1 (A1 - Residential Single Family)</td><td align="left" nowrap="true" class="ssDataColumn"></td><td align="right" nowrap="true" class="ssDataColumn">'
def parse_main_page(main_store, property, cyear='2010'):
propId,ownerId_sql = get_propId(property,cyear)
page_data = get_main_page(propId,cyear, ownerId_sql)
#page_data = open("page.txt").read()
addr = AddrSplit(page_data)
ownerName = NameSplit(page_data)
print "Owner Name:", ownerName
g = threading.Thread(target=get_pdf_listing, args=(propId,ownerId_sql,ownerName))
g.start()
THREADS.append(g)
ownerid = OwnerIdSplit(page_data)
print "Owner Id:", ownerid
#property = PropID(page_data)
#print property
ldesc = LDescSplit(page_data)
# Pull out values from summary stuff
print "Legal Desc: ",ldesc
appraised = AppraisedSplit(page_data).strip()
landHS = LandHSSplit(page_data).strip()
improveHS = ImprovementHSSplit(page_data).strip()
hsCap = HomeSteadCapSplit(page_data).strip()
assessed = AssessedSplit(page_data).strip()
exemptions = Exemptions(page_data)
residual = '$'+page_data.split(ResidualLand)[1].split("</td></tr>")[0].split("$")[1].strip()
primary = '$'+page_data.split(PrimarySite)[1].split("</td></tr>")[0].split("$")[1].strip()
primarySiteSq = get_land_sf(propId, ownerId_sql)
residualSq = get_rland_sf(propId, ownerId_sql)
if property not in main_store:
main_store[property] = {}
if not cyear in main_store[property]:
main_store[property][cyear] = {}
# get the properties history of values
previous_years = process_history(propId, ownerId_sql, int(cyear),2006)
for year in previous_years:
main_store[property][year] = previous_years[year]
# add in the prop values for this year
main_store[property][cyear]['AppraisedValue'] = appraised
main_store[property][cyear]['Land HS'] = landHS
main_store[property][cyear]['Improvement HS'] = improveHS
main_store[property][cyear]['HSCapAdj'] = hsCap
main_store[property][cyear]['Assessed'] = assessed
main_store[property][cyear]['Primary Site Value'] = primary
main_store[property][cyear]['Primary Site SQ'] = primarySiteSq
main_store[property][cyear]['Residual Land Value'] = residual
main_store[property][cyear]['Residual Land SQ'] = residualSq
main_store[property][cyear]['Exemptions'] = exemptions
# Get various floor SQ footage
segments = get_segments(propId)
improvements = traverse_segments(1, segments, propId)
#improvements = {}
imp = get_improvements(propId, ownerId_sql, page_data)
if len(imp) > 0:
for i in imp:
main_store[property][cyear][i] = imp[i]
for year in main_store[property].keys():
main_store[property][year]['Year'] = year
main_store[property][year]['Property'] = property
main_store[property][year]['Owner Id'] = ownerid
main_store[property][year]['Address'] = addr
main_store[property][year]['Owner Name'] =ownerName
main_store[property][year]['Property ID'] = property
main_store[property][year]['Legal Description'] = ldesc
main_store[property][year]['Primary Site SQ'] = primarySiteSq
main_store[property][year]['Residual Land SQ'] = residualSq
for segment in improvements.keys():
# do not have previous year improvements
if segment.find("Value") > -1 and year == cyear:
main_store[property][year][segment] = improvements[segment]
elif segment.find("Value") == -1:
main_store[property][year][segment] = improvements[segment]
return main_store
csv_str = '''Address,Owner Name,Owner Id,Property ID,Legal Description,Year,Improvement HS,Land HS,AppraisedValue,HSCapAdj,Assessed,First Floor SQ Footage,First Floor Value,Second Floor SQ Footage,Second Floor Value,Garage SQ Footage,Garage Value,Porch SQ Footage,Porch Value,Improvements 1,Improvements 2,Improvements 3,Primary Site SQ,Primary Site Value,Residual Land SQ,Residual Land Value'''
def write_csv(dict_of_props):
global csv_str
csv_list = csv_str.split(",")
data_strs = []
for property in dict_of_props.keys():
for year in dict_of_props[property].keys():
items = []
for item in csv_list:
if item in dict_of_props[property][year]:
items.append('"'+dict_of_props[property][year][item]+'"')
data_strs.append(",".join(items))
print "\n".join(data_strs)
return data_strs
def join_all_pending_thread(threads):
for i in threads:
if i.isAlive():
print "Waiting on thread"
t.join()
threads.clear()
return threads
|
deeso/python_scrirpts
|
tax stuff.py
|
Python
|
apache-2.0
| 15,594
| 0.026549
|
from setuptools import setup
from setuptools import find_packages
setup(name='Keras-layers',
version='0.0.1',
description='Collection of useful non-standard layers for keras',
author='Atanas Mirchev',
author_email='taimir93@gmail.com',
url='https://github.com/taimir/keras-layers',
license='MIT',
packages=find_packages())
|
taimir/keras-layers
|
setup.py
|
Python
|
mit
| 367
| 0
|
nb_epoch = 100
batch_size = 64
optimizer = 'adam'
hidden_units = 3000
nb_val_samples = 462
embedding_size = 10
dropout_percentage = 0.3
embedding_GRU_size = 100
maximum_line_length = 500
samples_per_epoch = 4127 * maximum_line_length
loss = 'categorical_crossentropy'
|
Kasai-Code/Kinesis
|
settings/hyperparameters.py
|
Python
|
mit
| 268
| 0
|
#!/usr/bin/env python
# Original code was created by Nadeem Douba as part of the Canari Framework
from collections import OrderedDict
from xml.etree.cElementTree import XML
from zipfile import ZipFile
def mtgx2json(graph):
zipfile = ZipFile(graph)
graphs = filter(lambda x: x.endswith('.graphml'), zipfile.namelist())
for f in graphs:
multikeys = []
xml = XML(zipfile.open(f).read())
links = {}
for edge in xml.findall('{http://graphml.graphdrawing.org/xmlns}graph/'
'{http://graphml.graphdrawing.org/xmlns}edge'):
src = edge.get('source')
dst = edge.get('target')
if src not in links:
links[src] = dict(in_=[], out=[])
if dst not in links:
links[dst] = dict(in_=[], out=[])
links[src]['out'].append(dst)
links[dst]['in_'].append(src)
for node in xml.findall('{http://graphml.graphdrawing.org/xmlns}graph/'
'{http://graphml.graphdrawing.org/xmlns}node'):
node_id = node.get('id')
node = node.find('{http://graphml.graphdrawing.org/xmlns}data/'
'{http://maltego.paterva.com/xml/mtgx}MaltegoEntity')
record = OrderedDict({'NodeID': node_id, 'EntityType': node.get('type').strip()})
props = {'Data': {}}
for prop in node.findall('{http://maltego.paterva.com/xml/mtgx}Properties/'
'{http://maltego.paterva.com/xml/mtgx}Property'):
value = prop.find('{http://maltego.paterva.com/xml/mtgx}Value').text or ''
entity_prop = {prop.get('displayName'): value.strip()}
props['Data'].update(entity_prop)
record.update(props)
s = ' - '.join(['%s: %s' % (key, value) for (key, value) in record['Data'].items()])
record.pop('Data')
data = {'Data': s}
record.update(data)
link = {'Links': {}}
i_link = {'Incoming': links.get(node_id, {}).get('in_', 0)}
link['Links'].update(i_link)
o_link = {'Outgoing': links.get(node_id, {}).get('out', 0)}
link['Links'].update(o_link)
record.update(link)
multikeys.append(record)
return multikeys
|
SneakersInc/Pandora
|
modules/exportgraph.py
|
Python
|
mit
| 2,370
| 0.002532
|
from io import StringIO
from coaster.logger import RepeatValueIndicator, filtered_value, pprint_with_indent
def test_filtered_value():
"""Test for filtered values."""
# Doesn't touch normal key/value pairs
assert filtered_value('normal', 'value') == 'value'
assert filtered_value('also_normal', 123) == 123
# But does redact sensitive keys
assert filtered_value('password', '123pass') != '123pass'
# The returned value is an object that renders via repr and str as '[Filtered]'
assert repr(filtered_value('password', '123pass')) == '[Filtered]'
assert str(filtered_value('password', '123pass')) == '[Filtered]'
# Also works on partial matches in the keys
assert repr(filtered_value('confirm_password', '123pass')) == '[Filtered]'
# The filter uses a verbose regex. Words in the middle of the regex also work
assert repr(filtered_value('access_token', 'secret-here')) == '[Filtered]'
# Filters are case insensitive
assert repr(filtered_value('TELEGRAM_ERROR_APIKEY', 'api:key')) == '[Filtered]'
# Keys with 'token' as a word are also filtered
assert repr(filtered_value('SMS_TWILIO_TOKEN', 'api:key')) == '[Filtered]'
# Numbers that look like card numbers are filtered
assert (
filtered_value('anything', 'My number is 1234 5678 9012 3456')
== 'My number is [Filtered]'
)
# This works with any combination of spaces and dashes within the number
assert (
filtered_value('anything', 'My number is 1234 5678-90123456')
== 'My number is [Filtered]'
)
def test_pprint_with_indent():
"""Test pprint_with_indent does indentation."""
out = StringIO()
data = {
12: 34,
'confirm_password': '12345qwerty',
'credentials': ['abc', 'def'],
'key': 'value',
'nested_dict': {'password': 'not_filtered'},
'password': '12345qwerty',
}
pprint_with_indent(data, out)
assert (
out.getvalue()
== '''\
{12: 34,
'confirm_password': [Filtered],
'credentials': [Filtered],
'key': 'value',
'nested_dict': {'password': 'not_filtered'},
'password': [Filtered]}
'''
)
def test_repeat_value_indicator():
"""Test RepeatValueIndicator class."""
assert repr(RepeatValueIndicator('key')) == "<same as prior 'key'>"
assert str(RepeatValueIndicator('key')) == "<same as prior 'key'>"
|
hasgeek/coaster
|
tests/test_logger.py
|
Python
|
bsd-2-clause
| 2,411
| 0.001659
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class BitcastTest(tf.test.TestCase):
def _testBitcast(self, x, datatype, shape):
with self.test_session():
tf_ans = tf.bitcast(x, datatype)
out = tf_ans.eval()
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = tf.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = tf.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, tf.int64, shape)
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = tf.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
tf.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = tf.int8
shape = [4]
self._testBitcast(x, datatype, shape)
def testUnknown(self):
x = tf.placeholder(tf.float32)
datatype = tf.int8
tf.bitcast(x, datatype, None)
if __name__ == "__main__":
tf.test.main()
|
ivano666/tensorflow
|
tensorflow/python/kernel_tests/bitcast_op_test.py
|
Python
|
apache-2.0
| 2,305
| 0.007375
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.