repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
cchurch/ansible
refs/heads/devel
lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_facts.py
7
#!/usr/bin/python # # Copyright (c) 2019 Zim Kalinowski, (@zikalino) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_devtestlabschedule_facts version_added: "2.8" short_description: Get Azure Schedule facts description: - Get facts of Azure Schedule. options: resource_group: description: - The name of the resource group. required: True lab_name: description: - The name of the lab. required: True name: description: - The name of the schedule. tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - Zim Kalinowski (@zikalino) ''' EXAMPLES = ''' - name: Get instance of Schedule azure_rm_devtestlabschedule_facts: resource_group: myResourceGroup lab_name: myLab name: mySchedule ''' RETURN = ''' schedules: description: - A list of dictionaries containing facts for Schedule. returned: always type: complex contains: id: description: - The identifier of the artifact source. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc hedules/labvmsshutdown" resource_group: description: - Name of the resource group. returned: always type: str sample: myResourceGroup lab_name: description: - Name of the lab. returned: always type: str sample: myLab name: description: - The name of the environment. returned: always type: str sample: lab_vms_shutdown time: description: - Time of the schedule. returned: always type: str sample: lab_vms_shutdown time_zone_id: description: - Time zone id. returned: always type: str sample: UTC+12 tags: description: - The tags of the resource. returned: always type: complex sample: "{ 'MyTag': 'MyValue' }" ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.devtestlabs import DevTestLabsClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMDtlScheduleFacts(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), lab_name=dict( type='str', required=True ), name=dict( type='str' ), tags=dict( type='list' ) ) # store the results of the module operation self.results = dict( changed=False ) self.mgmt_client = None self.resource_group = None self.lab_name = None self.name = None self.tags = None super(AzureRMDtlScheduleFacts, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, base_url=self._cloud_environment.endpoints.resource_manager) if self.name: self.results['schedules'] = self.get() else: self.results['schedules'] = self.list() return self.results def get(self): response = None results = [] try: response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group, lab_name=self.lab_name, name=_snake_to_camel(self.name)) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Schedule.') if response and self.has_tags(response.tags, self.tags): results.append(self.format_response(response)) return results def list(self): response = None results = [] try: response = self.mgmt_client.schedules.list(resource_group_name=self.resource_group, lab_name=self.lab_name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Schedule.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_response(item)) return results def format_response(self, item): d = item.as_dict() d = { 'resource_group': self.resource_group, 'lab_name': self.lab_name, 'name': _camel_to_snake(d.get('name')), 'id': d.get('id', None), 'tags': d.get('tags', None), 'time': d.get('daily_recurrence', {}).get('time'), 'time_zone_id': d.get('time_zone_id') } return d def main(): AzureRMDtlScheduleFacts() if __name__ == '__main__': main()
casebeer/factual
refs/heads/master
factual/v3/filter_helpers.py
1
from factual.common.shared_filter_helpers import *
AutonomyLab/deep_intent
refs/heads/master
code/autoencoder_model/scripts/track.py
1
import numpy as np import cv2 file = '../gifs/vid_3.gif' cap = cv2.VideoCapture(file) # params for ShiTomasi corner detection feature_params = dict( maxCorners = 100, qualityLevel = 0.3, minDistance = 7, blockSize = 7 ) # Parameters for lucas kanade optical flow lk_params = dict( winSize = (15,15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors color = np.random.randint(0,255,(100,3)) # Take first frame and find corners in it ret, old_frame = cap.read() old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) count = 0 while(1): print count count = count + 1 ret,frame = cap.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st==1] good_old = p0[st==1] # draw the tracks for i,(new,old) in enumerate(zip(good_new,good_old)): a,b = new.ravel() c,d = old.ravel() mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) img = cv2.add(frame,mask) cv2.imshow('frame',img) cv2.imwrite('../gifs/frame' + str(count) + '.png', img) k = cv2.waitKey(30) & 0xff if count == 10: break # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1,1,2) # cv2.destroyAllWindows() # cap.release()
harshita-gupta/Harvard-FRSEM-Catalog-2016-17
refs/heads/master
flask/lib/python2.7/site-packages/jinja2/filters.py
329
# -*- coding: utf-8 -*- """ jinja2.filters ~~~~~~~~~~~~~~ Bundled jinja filters. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import re import math from random import choice from operator import itemgetter from itertools import groupby from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \ unicode_urlencode from jinja2.runtime import Undefined from jinja2.exceptions import FilterArgumentError from jinja2._compat import imap, string_types, text_type, iteritems _word_re = re.compile(r'\w+(?u)') def contextfilter(f): """Decorator for marking context dependent filters. The current :class:`Context` will be passed as first argument. """ f.contextfilter = True return f def evalcontextfilter(f): """Decorator for marking eval-context dependent filters. An eval context object is passed as first argument. For more information about the eval context, see :ref:`eval-context`. .. versionadded:: 2.4 """ f.evalcontextfilter = True return f def environmentfilter(f): """Decorator for marking evironment dependent filters. The current :class:`Environment` is passed to the filter as first argument. """ f.environmentfilter = True return f def make_attrgetter(environment, attribute): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ if not isinstance(attribute, string_types) \ or ('.' not in attribute and not attribute.isdigit()): return lambda x: environment.getitem(x, attribute) attribute = attribute.split('.') def attrgetter(item): for part in attribute: if part.isdigit(): part = int(part) item = environment.getitem(item, part) return item return attrgetter def do_forceescape(value): """Enforce HTML escaping. This will probably double escape variables.""" if hasattr(value, '__html__'): value = value.__html__() return escape(text_type(value)) def do_urlencode(value): """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both dictionaries and regular strings as well as pairwise iterables. .. versionadded:: 2.7 """ itemiter = None if isinstance(value, dict): itemiter = iteritems(value) elif not isinstance(value, string_types): try: itemiter = iter(value) except TypeError: pass if itemiter is None: return unicode_urlencode(value) return u'&'.join(unicode_urlencode(k) + '=' + unicode_urlencode(v, for_qs=True) for k, v in itemiter) @evalcontextfilter def do_replace(eval_ctx, s, old, new, count=None): """Return a copy of the value with all occurrences of a substring replaced with a new one. The first argument is the substring that should be replaced, the second is the replacement string. If the optional third argument ``count`` is given, only the first ``count`` occurrences are replaced: .. sourcecode:: jinja {{ "Hello World"|replace("Hello", "Goodbye") }} -> Goodbye World {{ "aaaaargh"|replace("a", "d'oh, ", 2) }} -> d'oh, d'oh, aaargh """ if count is None: count = -1 if not eval_ctx.autoescape: return text_type(s).replace(text_type(old), text_type(new), count) if hasattr(old, '__html__') or hasattr(new, '__html__') and \ not hasattr(s, '__html__'): s = escape(s) else: s = soft_unicode(s) return s.replace(soft_unicode(old), soft_unicode(new), count) def do_upper(s): """Convert a value to uppercase.""" return soft_unicode(s).upper() def do_lower(s): """Convert a value to lowercase.""" return soft_unicode(s).lower() @evalcontextfilter def do_xmlattr(_eval_ctx, d, autospace=True): """Create an SGML/XML attribute string based on the items in a dict. All values that are neither `none` nor `undefined` are automatically escaped: .. sourcecode:: html+jinja <ul{{ {'class': 'my_list', 'missing': none, 'id': 'list-%d'|format(variable)}|xmlattr }}> ... </ul> Results in something like this: .. sourcecode:: html <ul class="my_list" id="list-42"> ... </ul> As you can see it automatically prepends a space in front of the item if the filter returned something unless the second parameter is false. """ rv = u' '.join( u'%s="%s"' % (escape(key), escape(value)) for key, value in iteritems(d) if value is not None and not isinstance(value, Undefined) ) if autospace and rv: rv = u' ' + rv if _eval_ctx.autoescape: rv = Markup(rv) return rv def do_capitalize(s): """Capitalize a value. The first character will be uppercase, all others lowercase. """ return soft_unicode(s).capitalize() def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ rv = [] for item in re.compile(r'([-\s]+)(?u)').split(soft_unicode(s)): if not item: continue rv.append(item[0].upper() + item[1:].lower()) return ''.join(rv) def do_dictsort(value, case_sensitive=False, by='key'): """Sort a dict and yield (key, value) pairs. Because python dicts are unsorted you may want to use this function to order them by either key or value: .. sourcecode:: jinja {% for item in mydict|dictsort %} sort the dict by key, case insensitive {% for item in mydict|dictsort(true) %} sort the dict by key, case sensitive {% for item in mydict|dictsort(false, 'value') %} sort the dict by value, case insensitive """ if by == 'key': pos = 0 elif by == 'value': pos = 1 else: raise FilterArgumentError('You can only sort by either ' '"key" or "value"') def sort_func(item): value = item[pos] if isinstance(value, string_types) and not case_sensitive: value = value.lower() return value return sorted(value.items(), key=sort_func) @environmentfilter def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): """Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added. """ if not case_sensitive: def sort_func(item): if isinstance(item, string_types): item = item.lower() return item else: sort_func = None if attribute is not None: getter = make_attrgetter(environment, attribute) def sort_func(item, processor=sort_func or (lambda x: x)): return processor(getter(item)) return sorted(value, key=sort_func, reverse=reverse) def do_default(value, default_value=u'', boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} """ if isinstance(value, Undefined) or (boolean and not value): return default_value return value @evalcontextfilter def do_join(eval_ctx, value, d=u'', attribute=None): """Return a string which is the concatenation of the strings in the sequence. The separator between elements is an empty string per default, you can define it with the optional parameter: .. sourcecode:: jinja {{ [1, 2, 3]|join('|') }} -> 1|2|3 {{ [1, 2, 3]|join }} -> 123 It is also possible to join certain attributes of an object: .. sourcecode:: jinja {{ users|join(', ', attribute='username') }} .. versionadded:: 2.6 The `attribute` parameter was added. """ if attribute is not None: value = imap(make_attrgetter(eval_ctx.environment, attribute), value) # no automatic escaping? joining is a lot eaiser then if not eval_ctx.autoescape: return text_type(d).join(imap(text_type, value)) # if the delimiter doesn't have an html representation we check # if any of the items has. If yes we do a coercion to Markup if not hasattr(d, '__html__'): value = list(value) do_escape = False for idx, item in enumerate(value): if hasattr(item, '__html__'): do_escape = True else: value[idx] = text_type(item) if do_escape: d = escape(d) else: d = text_type(d) return d.join(value) # no html involved, to normal joining return soft_unicode(d).join(imap(soft_unicode, value)) def do_center(value, width=80): """Centers the value in a field of a given width.""" return text_type(value).center(width) @environmentfilter def do_first(environment, seq): """Return the first item of a sequence.""" try: return next(iter(seq)) except StopIteration: return environment.undefined('No first item, sequence was empty.') @environmentfilter def do_last(environment, seq): """Return the last item of a sequence.""" try: return next(iter(reversed(seq))) except StopIteration: return environment.undefined('No last item, sequence was empty.') @environmentfilter def do_random(environment, seq): """Return a random item from the sequence.""" try: return choice(seq) except IndexError: return environment.undefined('No random item, sequence was empty.') def do_filesizeformat(value, binary=False): """Format the value like a 'human-readable' file size (i.e. 13 kB, 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega, Giga, etc.), if the second parameter is set to `True` the binary prefixes are used (Mebi, Gibi). """ bytes = float(value) base = binary and 1024 or 1000 prefixes = [ (binary and 'KiB' or 'kB'), (binary and 'MiB' or 'MB'), (binary and 'GiB' or 'GB'), (binary and 'TiB' or 'TB'), (binary and 'PiB' or 'PB'), (binary and 'EiB' or 'EB'), (binary and 'ZiB' or 'ZB'), (binary and 'YiB' or 'YB') ] if bytes == 1: return '1 Byte' elif bytes < base: return '%d Bytes' % bytes else: for i, prefix in enumerate(prefixes): unit = base ** (i + 2) if bytes < unit: return '%.1f %s' % ((base * bytes / unit), prefix) return '%.1f %s' % ((base * bytes / unit), prefix) def do_pprint(value, verbose=False): """Pretty print a variable. Useful for debugging. With Jinja 1.2 onwards you can pass it a parameter. If this parameter is truthy the output will be more verbose (this requires `pretty`) """ return pformat(value, verbose=verbose) @evalcontextfilter def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False, target=None): """Converts URLs in plain text into clickable links. If you pass the filter an additional integer it will shorten the urls to that number. Also a third argument exists that makes the urls "nofollow": .. sourcecode:: jinja {{ mytext|urlize(40, true) }} links are shortened to 40 chars and defined with rel="nofollow" If *target* is specified, the ``target`` attribute will be added to the ``<a>`` tag: .. sourcecode:: jinja {{ mytext|urlize(40, target='_blank') }} .. versionchanged:: 2.8+ The *target* parameter was added. """ rv = urlize(value, trim_url_limit, nofollow, target) if eval_ctx.autoescape: rv = Markup(rv) return rv def do_indent(s, width=4, indentfirst=False): """Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. """ indention = u' ' * width rv = (u'\n' + indention).join(s.splitlines()) if indentfirst: rv = indention + rv return rv def do_truncate(s, length=255, killwords=False, end='...'): """Return a truncated copy of the string. The length is specified with the first parameter which defaults to ``255``. If the second parameter is ``true`` the filter will cut the text at length. Otherwise it will discard the last word. If the text was in fact truncated it will append an ellipsis sign (``"..."``). If you want a different ellipsis sign than ``"..."`` you can specify it using the third parameter. .. sourcecode:: jinja {{ "foo bar baz"|truncate(9) }} -> "foo ..." {{ "foo bar baz"|truncate(9, True) }} -> "foo ba..." """ if len(s) <= length: return s elif killwords: return s[:length - len(end)] + end result = s[:length - len(end)].rsplit(' ', 1)[0] if len(result) < length: result += ' ' return result + end @environmentfilter def do_wordwrap(environment, s, width=79, break_long_words=True, wrapstring=None): """ Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`. By default, the newlines will be the default newlines for the environment, but this can be changed using the wrapstring keyword argument. .. versionadded:: 2.7 Added support for the `wrapstring` parameter. """ if not wrapstring: wrapstring = environment.newline_sequence import textwrap return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words)) def do_wordcount(s): """Count the words in that string.""" return len(_word_re.findall(s)) def do_int(value, default=0, base=10): """Convert the value into an integer. If the conversion doesn't work it will return ``0``. You can override this default using the first parameter. You can also override the default base (10) in the second parameter, which handles input with prefixes such as 0b, 0o and 0x for bases 2, 8 and 16 respectively. """ try: return int(value, base) except (TypeError, ValueError): # this quirk is necessary so that "42.23"|int gives 42. try: return int(float(value)) except (TypeError, ValueError): return default def do_float(value, default=0.0): """Convert the value into a floating point number. If the conversion doesn't work it will return ``0.0``. You can override this default using the first parameter. """ try: return float(value) except (TypeError, ValueError): return default def do_format(value, *args, **kwargs): """ Apply python string formatting on an object: .. sourcecode:: jinja {{ "%s - %s"|format("Hello?", "Foo!") }} -> Hello? - Foo! """ if args and kwargs: raise FilterArgumentError('can\'t handle positional and keyword ' 'arguments at the same time') return soft_unicode(value) % (kwargs or args) def do_trim(value): """Strip leading and trailing whitespace.""" return soft_unicode(value).strip() def do_striptags(value): """Strip SGML/XML tags and replace adjacent whitespace by one space. """ if hasattr(value, '__html__'): value = value.__html__() return Markup(text_type(value)).striptags() def do_slice(value, slices, fill_with=None): """Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns: .. sourcecode:: html+jinja <div class="columwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} <li>{{ item }}</li> {%- endfor %} </ul> {%- endfor %} </div> If you pass it a second argument it's used to fill missing values on the last iteration. """ seq = list(value) length = len(seq) items_per_slice = length // slices slices_with_extra = length % slices offset = 0 for slice_number in range(slices): start = offset + slice_number * items_per_slice if slice_number < slices_with_extra: offset += 1 end = offset + (slice_number + 1) * items_per_slice tmp = seq[start:end] if fill_with is not None and slice_number >= slices_with_extra: tmp.append(fill_with) yield tmp def do_batch(value, linecount, fill_with=None): """ A filter that batches items. It works pretty much like `slice` just the other way round. It returns a list of lists with the given number of items. If you provide a second parameter this is used to fill up missing items. See this example: .. sourcecode:: html+jinja <table> {%- for row in items|batch(3, '&nbsp;') %} <tr> {%- for column in row %} <td>{{ column }}</td> {%- endfor %} </tr> {%- endfor %} </table> """ tmp = [] for item in value: if len(tmp) == linecount: yield tmp tmp = [] tmp.append(item) if tmp: if fill_with is not None and len(tmp) < linecount: tmp += [fill_with] * (linecount - len(tmp)) yield tmp def do_round(value, precision=0, method='common'): """Round the number to a given precision. The first parameter specifies the precision (default is ``0``), the second the rounding method: - ``'common'`` rounds either up or down - ``'ceil'`` always rounds up - ``'floor'`` always rounds down If you don't specify a method ``'common'`` is used. .. sourcecode:: jinja {{ 42.55|round }} -> 43.0 {{ 42.55|round(1, 'floor') }} -> 42.5 Note that even if rounded to 0 precision, a float is returned. If you need a real integer, pipe it through `int`: .. sourcecode:: jinja {{ 42.55|round|int }} -> 43 """ if not method in ('common', 'ceil', 'floor'): raise FilterArgumentError('method must be common, ceil or floor') if method == 'common': return round(value, precision) func = getattr(math, method) return func(value * (10 ** precision)) / (10 ** precision) @environmentfilter def do_groupby(environment, value, attribute): """Group a sequence of objects by a common attribute. If you for example have a list of dicts or objects that represent persons with `gender`, `first_name` and `last_name` attributes and you want to group all users by genders you can do something like the following snippet: .. sourcecode:: html+jinja <ul> {% for group in persons|groupby('gender') %} <li>{{ group.grouper }}<ul> {% for person in group.list %} <li>{{ person.first_name }} {{ person.last_name }}</li> {% endfor %}</ul></li> {% endfor %} </ul> Additionally it's possible to use tuple unpacking for the grouper and list: .. sourcecode:: html+jinja <ul> {% for grouper, list in persons|groupby('gender') %} ... {% endfor %} </ul> As you can see the item we're grouping by is stored in the `grouper` attribute and the `list` contains all the objects that have this grouper in common. .. versionchanged:: 2.6 It's now possible to use dotted notation to group by the child attribute of another attribute. """ expr = make_attrgetter(environment, attribute) return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr))) class _GroupTuple(tuple): __slots__ = () grouper = property(itemgetter(0)) list = property(itemgetter(1)) def __new__(cls, xxx_todo_changeme): (key, value) = xxx_todo_changeme return tuple.__new__(cls, (key, list(value))) @environmentfilter def do_sum(environment, iterable, attribute=None, start=0): """Returns the sum of a sequence of numbers plus the value of parameter 'start' (which defaults to 0). When the sequence is empty it returns start. It is also possible to sum up only certain attributes: .. sourcecode:: jinja Total: {{ items|sum(attribute='price') }} .. versionchanged:: 2.6 The `attribute` parameter was added to allow suming up over attributes. Also the `start` parameter was moved on to the right. """ if attribute is not None: iterable = imap(make_attrgetter(environment, attribute), iterable) return sum(iterable, start) def do_list(value): """Convert the value into a list. If it was a string the returned list will be a list of characters. """ return list(value) def do_mark_safe(value): """Mark the value as safe which means that in an environment with automatic escaping enabled this variable will not be escaped. """ return Markup(value) def do_mark_unsafe(value): """Mark a value as unsafe. This is the reverse operation for :func:`safe`.""" return text_type(value) def do_reverse(value): """Reverse the object or return an iterator that iterates over it the other way round. """ if isinstance(value, string_types): return value[::-1] try: return reversed(value) except TypeError: try: rv = list(value) rv.reverse() return rv except TypeError: raise FilterArgumentError('argument must be iterable') @environmentfilter def do_attr(environment, obj, name): """Get an attribute of an object. ``foo|attr("bar")`` works like ``foo.bar`` just that always an attribute is returned and items are not looked up. See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details. """ try: name = str(name) except UnicodeError: pass else: try: value = getattr(obj, name) except AttributeError: pass else: if environment.sandboxed and not \ environment.is_safe_attribute(obj, name, value): return environment.unsafe_undefined(obj, name) return value return environment.undefined(obj=obj, name=name) @contextfilter def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} .. versionadded:: 2.7 """ context = args[0] seq = args[1] if len(args) == 2 and 'attribute' in kwargs: attribute = kwargs.pop('attribute') if kwargs: raise FilterArgumentError('Unexpected keyword argument %r' % next(iter(kwargs))) func = make_attrgetter(context.environment, attribute) else: try: name = args[2] args = args[3:] except LookupError: raise FilterArgumentError('map requires a filter argument') func = lambda item: context.environment.call_filter( name, item, args, kwargs, context=context) if seq: for item in seq: yield func(item) @contextfilter def do_select(*args, **kwargs): """Filters a sequence of objects by applying a test to the object and only selecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ numbers|select("odd") }} {{ numbers|select("odd") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: x, False) @contextfilter def do_reject(*args, **kwargs): """Filters a sequence of objects by applying a test to the object and rejecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ numbers|reject("odd") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: not x, False) @contextfilter def do_selectattr(*args, **kwargs): """Filters a sequence of objects by applying a test to an attribute of an object and only selecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ users|selectattr("is_active") }} {{ users|selectattr("email", "none") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: x, True) @contextfilter def do_rejectattr(*args, **kwargs): """Filters a sequence of objects by applying a test to an attribute of an object or the attribute and rejecting the ones with the test succeeding. .. sourcecode:: jinja {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: not x, True) def _select_or_reject(args, kwargs, modfunc, lookup_attr): context = args[0] seq = args[1] if lookup_attr: try: attr = args[2] except LookupError: raise FilterArgumentError('Missing parameter for attribute name') transfunc = make_attrgetter(context.environment, attr) off = 1 else: off = 0 transfunc = lambda x: x try: name = args[2 + off] args = args[3 + off:] func = lambda item: context.environment.call_test( name, item, args, kwargs) except LookupError: func = bool if seq: for item in seq: if modfunc(func(transfunc(item))): yield item FILTERS = { 'abs': abs, 'attr': do_attr, 'batch': do_batch, 'capitalize': do_capitalize, 'center': do_center, 'count': len, 'd': do_default, 'default': do_default, 'dictsort': do_dictsort, 'e': escape, 'escape': escape, 'filesizeformat': do_filesizeformat, 'first': do_first, 'float': do_float, 'forceescape': do_forceescape, 'format': do_format, 'groupby': do_groupby, 'indent': do_indent, 'int': do_int, 'join': do_join, 'last': do_last, 'length': len, 'list': do_list, 'lower': do_lower, 'map': do_map, 'pprint': do_pprint, 'random': do_random, 'reject': do_reject, 'rejectattr': do_rejectattr, 'replace': do_replace, 'reverse': do_reverse, 'round': do_round, 'safe': do_mark_safe, 'select': do_select, 'selectattr': do_selectattr, 'slice': do_slice, 'sort': do_sort, 'string': soft_unicode, 'striptags': do_striptags, 'sum': do_sum, 'title': do_title, 'trim': do_trim, 'truncate': do_truncate, 'upper': do_upper, 'urlencode': do_urlencode, 'urlize': do_urlize, 'wordcount': do_wordcount, 'wordwrap': do_wordwrap, 'xmlattr': do_xmlattr, }
dabelenda/amm
refs/heads/master
src/auth/ldap.py
2
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017""" import re import ldap3 from config.settings import base class Authenticator: """ Class to authenticate users using LDAP(S) """ def __init__(self): self.ldap_server = base.get_config('LDAP_SERVER') self.protocol = 'ldaps' if base.get_config('LDAP_USE_SSL') == 'true' else 'ldap' self.use_ssl = True if base.get_config('LDAP_USE_SSL') == 'true' else False self.uri = self.protocol + '://' + self.ldap_server self.dn = base.get_config('LDAP_BASE_DN') self.user_attr = base.get_config('LDAP_USER_SEARCH_ATTR') def get_user_dn(self, username): server = ldap3.Server('ldap://' + self.ldap_server) connection = ldap3.Connection(server) connection.open() connection.search( search_base=self.dn, search_filter='(' + self.user_attr + '=' + username + ')' ) return connection.response[0]['dn'] def authenticate(self, username, password): """ Authenticate the user with a bind on the LDAP server """ if username is None or password is None: return False # check the username if not re.match("^[A-Za-z0-9_-]*$", username): return False user_dn = self.get_user_dn(username) server = ldap3.Server( self.uri, use_ssl=self.use_ssl ) connection = ldap3.Connection(server, user=user_dn, password=password) return connection.bind()
zdary/intellij-community
refs/heads/master
python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/assignmentWithComplexUnpacking_after.py
31
y: Optional[Union[None, Any]] x: Callable[..., int] z: Any [y, (x, (z))] = undefined()
ChronoMonochrome/android_kernel_lenovo_msm8916
refs/heads/master
tools/perf/scripts/python/netdev-times.py
11271
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
johnraz/django-rest-framework
refs/heads/master
rest_framework/exceptions.py
8
""" Handled exceptions raised by REST framework. In addition Django's built in 403 and 404 exceptions are handled. (`django.http.Http404` and `django.core.exceptions.PermissionDenied`) """ from __future__ import unicode_literals import math from django.utils import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext from rest_framework import status from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList def _force_text_recursive(data): """ Descend into a nested data structure, forcing any lazy translation strings into plain text. """ if isinstance(data, list): ret = [ _force_text_recursive(item) for item in data ] if isinstance(data, ReturnList): return ReturnList(ret, serializer=data.serializer) return ret elif isinstance(data, dict): ret = { key: _force_text_recursive(value) for key, value in data.items() } if isinstance(data, ReturnDict): return ReturnDict(ret, serializer=data.serializer) return ret return force_text(data) class APIException(Exception): """ Base class for REST framework exceptions. Subclasses should provide `.status_code` and `.default_detail` properties. """ status_code = status.HTTP_500_INTERNAL_SERVER_ERROR default_detail = _('A server error occurred.') def __init__(self, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) def __str__(self): return self.detail # The recommended style for using `ValidationError` is to keep it namespaced # under `serializers`, in order to minimize potential confusion with Django's # built in `ValidationError`. For example: # # from rest_framework import serializers # raise serializers.ValidationError('Value was invalid') class ValidationError(APIException): status_code = status.HTTP_400_BAD_REQUEST def __init__(self, detail): # For validation errors the 'detail' key is always required. # The details should always be coerced to a list if not already. if not isinstance(detail, dict) and not isinstance(detail, list): detail = [detail] self.detail = _force_text_recursive(detail) def __str__(self): return six.text_type(self.detail) class ParseError(APIException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Malformed request.') class AuthenticationFailed(APIException): status_code = status.HTTP_401_UNAUTHORIZED default_detail = _('Incorrect authentication credentials.') class NotAuthenticated(APIException): status_code = status.HTTP_401_UNAUTHORIZED default_detail = _('Authentication credentials were not provided.') class PermissionDenied(APIException): status_code = status.HTTP_403_FORBIDDEN default_detail = _('You do not have permission to perform this action.') class NotFound(APIException): status_code = status.HTTP_404_NOT_FOUND default_detail = _('Not found.') class MethodNotAllowed(APIException): status_code = status.HTTP_405_METHOD_NOT_ALLOWED default_detail = _('Method "{method}" not allowed.') def __init__(self, method, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail).format(method=method) class NotAcceptable(APIException): status_code = status.HTTP_406_NOT_ACCEPTABLE default_detail = _('Could not satisfy the request Accept header.') def __init__(self, detail=None, available_renderers=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) self.available_renderers = available_renderers class UnsupportedMediaType(APIException): status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE default_detail = _('Unsupported media type "{media_type}" in request.') def __init__(self, media_type, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail).format( media_type=media_type ) class Throttled(APIException): status_code = status.HTTP_429_TOO_MANY_REQUESTS default_detail = _('Request was throttled.') extra_detail_singular = 'Expected available in {wait} second.' extra_detail_plural = 'Expected available in {wait} seconds.' def __init__(self, wait=None, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) if wait is None: self.wait = None else: self.wait = math.ceil(wait) self.detail += ' ' + force_text(ungettext( self.extra_detail_singular.format(wait=self.wait), self.extra_detail_plural.format(wait=self.wait), self.wait ))
capchu/TextRPGOnline
refs/heads/master
rpgonline/env/lib/python2.7/os.py
4
/usr/lib/python2.7/os.py
jumpstarter-io/cinder
refs/heads/master
cinder/api/contrib/volume_mig_status_attribute.py
14
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil authorize = extensions.soft_extension_authorizer('volume', 'volume_mig_status_attribute') class VolumeMigStatusAttributeController(wsgi.Controller): def _add_volume_mig_status_attribute(self, req, context, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:migstat" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['migration_status'] key = "%s:name_id" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['_name_id'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeMigStatusAttributeTemplate()) self._add_volume_mig_status_attribute(req, context, resp_obj.obj['volume']) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeListMigStatusAttributeTemplate()) for vol in list(resp_obj.obj['volumes']): self._add_volume_mig_status_attribute(req, context, vol) class Volume_mig_status_attribute(extensions.ExtensionDescriptor): """Expose migration_status as an attribute of a volume.""" name = "VolumeMigStatusAttribute" alias = "os-vol-mig-status-attr" namespace = ("http://docs.openstack.org/volume/ext/" "volume_mig_status_attribute/api/v1") updated = "2013-08-08T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeMigStatusAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] def make_volume(elem): elem.set('{%s}migstat' % Volume_mig_status_attribute.namespace, '%s:migstat' % Volume_mig_status_attribute.alias) elem.set('{%s}name_id' % Volume_mig_status_attribute.namespace, '%s:name_id' % Volume_mig_status_attribute.alias) class VolumeMigStatusAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) alias = Volume_mig_status_attribute.alias namespace = Volume_mig_status_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeListMigStatusAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) alias = Volume_mig_status_attribute.alias namespace = Volume_mig_status_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
djangorussia/django-1.3-branch
refs/heads/master
django/conf/locale/el/formats.py
433
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd E Y' TIME_FORMAT = 'g:i:s A' # DATETIME_FORMAT = YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd M Y' # SHORT_DATETIME_FORMAT = # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' # NUMBER_GROUPING =
flyingk/InertialNav
refs/heads/master
code/plot_mag.py
6
#!/bin/python import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.cbook as cbook import numpy as np import math data = np.genfromtxt('MagFuse.txt', delimiter=' ', skip_header=1, skip_footer=1, names=['time', 'IMX', 'VMX', 'IMY', 'VMY', 'IMZ', 'VMZ']) fig = plt.figure() ax1 = fig.add_subplot(311) SMX = pow(data['VMX'],0.5) ax1.set_title("Magnetometer Innovations") ax1.set_xlabel('time (s)') ax1.set_ylabel('X') #ax1.set_ylim([-0.0025,0.0025]) ax1.plot(data['time'], data['IMX'], color='b', label='Mag X') ax1.plot(data['time'], SMX, color='r') ax1.plot(data['time'], -SMX, color='r') ax2 = fig.add_subplot(312) SMY = pow(data['VMY'],0.5) ax2.set_xlabel('time (s)') ax2.set_ylabel('Y') #ax2.set_ylim([-0.0025,0.0025]) ax2.plot(data['time'], data['IMY'], color='b', label='Mag Y') ax2.plot(data['time'], SMY, color='r') ax2.plot(data['time'], -SMY, color='r') ax3 = fig.add_subplot(313) SMZ = pow(data['VMZ'],0.5) ax3.set_xlabel('time (s)') ax2.set_ylabel('Y') #ax3.set_ylim([-0.0025,0.0025]) ax3.plot(data['time'], data['IMZ'], color='b', label='Mag Z') ax3.plot(data['time'], SMZ, color='r') ax3.plot(data['time'], -SMZ, color='r') plt.show()
calfonso/ansible
refs/heads/devel
lib/ansible/modules/cloud/cloudstack/cs_network_offering.py
38
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017, David Passante (@dpassante) # Copyright (c) 2017, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_network_offering short_description: Manages network offerings on Apache CloudStack based clouds. description: - Create, update, enable, disable and remove network offerings. version_added: '2.5' author: "David Passante (@dpassante)" options: state: description: - State of the network offering. choices: [ enabled, present, disabled, absent] default: present display_text: description: - Display text of the network offerings. guest_ip_type: description: - Guest type of the network offering. choices: [ Shared, Isolated ] name: description: - The name of the network offering. required: true supported_services: description: - Services supported by the network offering. - One or more of the choices. choices: [ Dns, PortForwarding, Dhcp, SourceNat, UserData, Firewall, StaticNat, Vpn, Lb ] aliases: [ supported_service ] traffic_type: description: - The traffic type for the network offering. default: Guest availability: description: - The availability of network offering. Default value is Optional conserve_mode: description: - Whether the network offering has IP conserve mode enabled. type: bool details: description: - Network offering details in key/value pairs. - with service provider as a value choices: [ internallbprovider, publiclbprovider ] egress_default_policy: description: - Whether the default egress policy is allow or to deny. choices: [ allow, deny ] persistent: description: - True if network offering supports persistent networks - defaulted to false if not specified keepalive_enabled: description: - If true keepalive will be turned on in the loadbalancer. - At the time of writing this has only an effect on haproxy. - the mode http and httpclose options are unset in the haproxy conf file. type: bool max_connections: description: - Maximum number of concurrent connections supported by the network offering. network_rate: description: - Data transfer rate in megabits per second allowed. service_capabilities: description: - Desired service capabilities as part of network offering. aliases: [ service_capability ] service_offering: description: - The service offering name or ID used by virtual router provider. service_provider: description: - Provider to service mapping. - If not specified, the provider for the service will be mapped to the default provider on the physical network. aliases: [service_provider] specify_ip_ranges: description: - Wheter the network offering supports specifying IP ranges. - Defaulted to C(no) by the API if not specified. type: bool specify_vlan: description: - Whether the network offering supports vlans or not. type: bool extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' - name: Create a network offering and enable it local_action: module: cs_network_offering name: my_network_offering display_text: network offering description state: enabled guest_ip_type: Isolated supported_services: [ Dns, PortForwarding, Dhcp, SourceNat, UserData, Firewall, StaticNat, Vpn, Lb ] service_providers: - { service: 'dns', provider: 'virtualrouter' } - { service: 'dhcp', provider: 'virtualrouter' } - name: Remove a network offering local_action: module: cs_network_offering name: my_network_offering state: absent ''' RETURN = ''' --- id: description: UUID of the network offering. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f name: description: The name of the network offering. returned: success type: string sample: MyCustomNetworkOffering display_text: description: The display text of the network offering. returned: success type: string sample: My network offering state: description: The state of the network offering. returned: success type: string sample: Enabled guest_ip_type: description: Guest type of the network offering. returned: success type: string sample: Isolated availability: description: The availability of network offering. returned: success type: string sample: Optional service_offering_id: description: The service offering ID. returned: success type: string sample: c5f7a5fc-43f8-11e5-a151-feff819cdc9f max_connections: description: The maximum number of concurrents connections to be handled by LB. returned: success type: int sample: 300 network_rate: description: The network traffic transfer ate in Mbit/s. returned: success type: int sample: 200 traffic_type: description: The traffic type. returned: success type: string sample: Guest egress_default_policy: description: Default egress policy. returned: success type: string sample: allow is_persistent: description: Whether persistent networks are supported or not. returned: success type: bool sample: false is_default: description: Whether network offering is the default offering or not. returned: success type: bool sample: false ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, cs_argument_spec, cs_required_together, ) class AnsibleCloudStackNetworkOffering(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackNetworkOffering, self).__init__(module) self.returns = { 'guestiptype': 'guest_ip_type', 'availability': 'availability', 'serviceofferingid': 'service_offering_id', 'networkrate': 'network_rate', 'maxconnections': 'max_connections', 'traffictype': 'traffic_type', 'isdefault': 'is_default', 'ispersistent': 'is_persistent', } self.network_offering = None def get_service_offering_id(self): service_offering = self.module.params.get('service_offering') if not service_offering: return None args = { 'issystem': True } service_offerings = self.query_api('listServiceOfferings', **args) if service_offerings: for s in service_offerings['serviceoffering']: if service_offering in [s['name'], s['id']]: return s['id'] self.fail_json(msg="Service offering '%s' not found" % service_offering) def get_network_offering(self): if self.network_offering: return self.network_offering args = { 'name': self.module.params.get('name'), 'guestiptype': self.module.params.get('guest_type'), } no = self.query_api('listNetworkOfferings', **args) if no: self.network_offering = no['networkoffering'][0] return self.network_offering def create_or_update(self): network_offering = self.get_network_offering() if not network_offering: network_offering = self.create_network_offering() return self.update_network_offering(network_offering=network_offering) def create_network_offering(self): network_offering = None self.result['changed'] = True args = { 'state': self.module.params.get('state'), 'displaytext': self.module.params.get('display_text'), 'guestiptype': self.module.params.get('guest_ip_type'), 'name': self.module.params.get('name'), 'supportedservices': self.module.params.get('supported_services'), 'traffictype': self.module.params.get('traffic_type'), 'availability': self.module.params.get('availability'), 'conservemode': self.module.params.get('conserve_mode'), 'details': self.module.params.get('details'), 'egressdefaultpolicy': self.module.params.get('egress_default_policy') == 'allow', 'ispersistent': self.module.params.get('persistent'), 'keepaliveenabled': self.module.params.get('keepalive_enabled'), 'maxconnections': self.module.params.get('max_connections'), 'networkrate': self.module.params.get('network_rate'), 'servicecapabilitylist': self.module.params.get('service_capabilities'), 'serviceofferingid': self.get_service_offering_id(), 'serviceproviderlist': self.module.params.get('service_providers'), 'specifyipranges': self.module.params.get('specify_ip_ranges'), 'specifyvlan': self.module.params.get('specify_vlan'), } required_params = [ 'display_text', 'guest_ip_type', 'supported_services', 'service_providers', ] self.module.fail_on_missing_params(required_params=required_params) if not self.module.check_mode: res = self.query_api('createNetworkOffering', **args) network_offering = res['networkoffering'] return network_offering def delete_network_offering(self): network_offering = self.get_network_offering() if network_offering: self.result['changed'] = True if not self.module.check_mode: self.query_api('deleteNetworkOffering', id=network_offering['id']) return network_offering def update_network_offering(self, network_offering): if not network_offering: return network_offering args = { 'id': network_offering['id'], 'state': self.module.params.get('state'), 'displaytext': self.module.params.get('display_text'), 'name': self.module.params.get('name'), 'availability': self.module.params.get('availability'), 'maxconnections': self.module.params.get('max_connections'), } if args['state'] in ['enabled', 'disabled']: args['state'] = args['state'].title() else: del args['state'] if self.has_changed(args, network_offering): self.result['changed'] = True if not self.module.check_mode: res = self.query_api('updateNetworkOffering', **args) network_offering = res['networkoffering'] return network_offering def get_result(self, network_offering): super(AnsibleCloudStackNetworkOffering, self).get_result(network_offering) if network_offering: self.result['egress_default_policy'] = 'allow' if network_offering.get('egressdefaultpolicy') else 'deny' return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( state=dict(choices=['enabled', 'present', 'disabled', 'absent'], default='present'), display_text=dict(), guest_ip_type=dict(choices=['Shared', 'Isolated']), name=dict(required=True), supported_services=dict(type='list', aliases=['supported_service']), traffic_type=dict(default='Guest'), availability=dict(), conserve_mode=dict(type='bool'), details=dict(type='list'), egress_default_policy=dict(choices=['allow', 'deny']), persistent=dict(type='bool'), keepalive_enabled=dict(type='bool'), max_connections=dict(type='int'), network_rate=dict(type='int'), service_capabilities=dict(type='list', aliases=['service_capability']), service_offering=dict(), service_providers=dict(type='list', aliases=['service_provider']), specify_ip_ranges=dict(type='bool'), specify_vlan=dict(type='bool'), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) acs_network_offering = AnsibleCloudStackNetworkOffering(module) state = module.params.get('state') if state in ['absent']: network_offering = acs_network_offering.delete_network_offering() else: network_offering = acs_network_offering.create_or_update() result = acs_network_offering.get_result(network_offering) module.exit_json(**result) if __name__ == '__main__': main()
cdsteinkuehler/MachineKit
refs/heads/MachineKit-ubc
share/gscreen/skins/gaxis/gaxis_handler.py
7
import hal # This is a handler file for using Gscreen's infrastructure # to load a completely custom glade screen # The only things that really matters is that it's saved as a GTK builder project, # the toplevel window is caller window1 (The default name) and you connect a destroy # window signal else you can't close down linuxcnc class HandlerClass: # This will be pretty standard to gain access to everything # emc is for control and status of linuxcnc # data is important data from gscreen and linuxcnc # widgets is all the widgets from the glade files # gscreen is for access to gscreens methods def __init__(self, halcomp,builder,useropts,gscreen): self.emc = gscreen.emc self.data = gscreen.data self.widgets = gscreen.widgets self.gscreen = gscreen # erase the ready-to-home message on statusbar def on_hal_status_all_homed(self,widget): print "all-homed" self.data.all_homed = True self.widgets.statusbar1.remove_message(self.gscreen.statusbar_id,self.gscreen.homed_status_message) # This connects siganals without using glade's autoconnect method # in this case to destroy the window # it calls the method in gscreen: gscreen.on_window_destroy() # and run-at-line dialog def connect_signals(self,handlers): signal_list = [ ["window1","destroy", "on_window1_destroy"], ["restart_ok","clicked", "restart_dialog_return", True], ["restart_cancel","clicked", "restart_dialog_return", False], ["restart","clicked", "launch_restart_dialog"], ["restart_line_up","clicked", "restart_up"], ["restart_line_down","clicked", "restart_down"], ["restart_line_input","value_changed", "restart_set_line"], ["metric_select","clicked","on_metric_select_clicked"], ] for i in signal_list: if len(i) == 3: self.gscreen.widgets[i[0]].connect(i[1], self.gscreen[i[2]]) elif len(i) == 4: self.gscreen.widgets[i[0]].connect(i[1], self.gscreen[i[2]],i[3]) # We don't want Gscreen to initialize it's regular widgets because this custom # screen doesn't have most of them. So we add this function call. # Since this custom screen uses gladeVCP magic for its interaction with linuxcnc # We don't add much to this function, but we do want to be able to change the theme so: # We change the GTK theme to what's in gscreen's preference file. # gscreen.change_theme() is a method in gscreen that changes the GTK theme of window1 # gscreen.data.theme_name is the name of the theme from the preference file # To truely be friendly, we should add a way to change the theme directly in the custom screen. # we also set up the statusbar and add a ready-to-home message def initialize_widgets(self): self.gscreen.change_theme(self.data.theme_name) self.gscreen.statusbar_id = self.widgets.statusbar1.get_context_id("Statusbar1") self.gscreen.homed_status_message = self.widgets.statusbar1.push(1,"Ready For Homing") # If we need extra HAL pins here is where we do it. # Note you must import hal at the top of this script to do it. # For gaxis there is no extra pins but since we don't want gscreen to # add it's default pins we added this function def initialize_pins(self): pass # every 50 milli seconds this gets called # add pass so gscreen doesn't try to update it's regular widgets or # add the individual function names that you would like to call. # In this case we wish to call Gscreen's default function for units button update def periodic(self): self.gscreen.update_units_button_label() # standard handler call def get_handlers(halcomp,builder,useropts,gscreen): return [HandlerClass(halcomp,builder,useropts,gscreen)]
valentin-krasontovitsch/ansible
refs/heads/devel
lib/ansible/plugins/doc_fragments/dellos10.py
8
# -*- coding: utf-8 -*- # Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com> # Copyright: (c) 2016, Dell Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: provider: description: - A dict object containing connection details. suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. default: 22 username: description: - User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. ssh_keyfile: description: - Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. timeout: description: - Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations. default: 10 notes: - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking). """
dosiecki/NewsBlur
refs/heads/master
vendor/feedvalidator/demo/src/rdflib/syntax/parsers/__init__.py
19
__all__ = ["RDFXMLParser", "NTParser"]
harmslab/epistasis
refs/heads/master
epistasis/models/linear/lasso.py
2
import numpy as np from sklearn.linear_model import Lasso from ..base import BaseModel, use_sklearn from ..utils import arghandler # Suppress an annoying error from scikit-learn import warnings warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd") @use_sklearn(Lasso) class EpistasisLasso(BaseModel): """A scikit-learn Lasso Regression class for discovering sparse epistatic coefficients. Methods are described in the following publication: Poelwijk FJ, Socolich M, and Ranganathan R. 'Learning the pattern of epistasis linking enotype and phenotype in a protein'. bioRxiv. (2017). Parameters ---------- order : int order of epistasis model_type : str (default="global") model matrix type. See publication above for more information alpha : float Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by the EpistasisLinearRegression object. precompute : Whether to use a precomputed Gram matrix to speed up calculations. If set to 'auto' let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always True to preserve sparsity. max_iter : int The maximum number of iterations. tol : float The tolerance for the optimization: if the updates are smaller than tol, the optimization code checks the dual gap for optimality and continues until it is smaller than tol. warm_start : bool When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool When set to True, forces the coefficients to be positive. random_state : int The seed of the pseudo random number generator that selects a random feature to update. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Used when selection == 'random'. selection : str If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. """ def __init__( self, order=1, model_type="global", alpha=1.0, precompute=False, max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic', **kwargs): # Set Linear Regression settings. self.fit_intercept = False self.normalize = False self.copy_X = True self.alpha = alpha self.precompute = precompute self.max_iter = max_iter self.tol = tol self.warm_start = warm_start self.positive = positive self.random_state = random_state self.selection = selection self.l1_ratio = 1.0 self.set_params(model_type=model_type, order=order) self.Xbuilt = {} # Store model specs. self.model_specs = dict( order=self.order, model_type=self.model_type, **kwargs) def compression_ratio(self): """Compute the compression ratio for the Lasso regression """ vals = self.epistasis.values zeros = vals[vals == 0] numer = len(zeros) denom = len(vals) return numer/denom @property def num_of_params(self): n = 0 n += self.epistasis.n return n @arghandler def fit(self, X=None, y=None, **kwargs): # If a threshold exists in the data, pre-classify genotypes X = np.asfortranarray(X) self = super(self.__class__, self).fit(X, y) # Link coefs to epistasis values. self.epistasis.values = np.reshape(self.coef_, (-1,)) return self def fit_transform(self, X=None, y=None, **kwargs): return self.fit(X=X, y=y, **kwargs) @arghandler def predict(self, X=None): X = np.asfortranarray(X) return super(self.__class__, self).predict(X) @arghandler def predict_transform(self, X=None, y=None): return self.predict(X=X) @arghandler def score(self, X=None, y=None): X = np.asfortranarray(X) return super(self.__class__, self).score(X, y) @property def thetas(self): return self.coef_ @arghandler def hypothesis(self, X=None, thetas=None): return np.dot(X, thetas) @arghandler def hypothesis_transform(self, X=None, y=None, thetas=None): return self.hypothesis(X=X, thetas=thetas) @arghandler def lnlike_of_data( self, X=None, y=None, yerr=None, thetas=None): # Calculate y from model. ymodel = self.hypothesis(X=X, thetas=thetas) # Return the likelihood of this model (with an L1 prior) return (- 0.5 * np.log(2 * np.pi * yerr**2) - (0.5 * ((y - ymodel)**2 / yerr**2)) - (self.alpha * sum(abs(thetas)))) @arghandler def lnlike_transform( self, X=None, y=None, yerr=None, lnprior=None, thetas=None): # Update likelihood. lnlike = self.lnlike_of_data(X=X, y=y, yerr=yerr, thetas=thetas) return lnlike + lnprior
idncom/odoo
refs/heads/8.0
addons/pos_discount/__init__.py
315
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import discount # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
xfournet/intellij-community
refs/heads/master
python/testData/inspections/PyArgumentListInspection/decoratedChangedParameters.py
83
def fill(f): return lambda: f('test') @fill def test(x): return x test()
peterfpeterson/mantid
refs/heads/master
Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/ReflectometryILLPolarizationCorTest.py
3
# -*- coding: utf-8 -*-# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + from mantid.api import MatrixWorkspace, WorkspaceGroup from mantid.simpleapi import (ReflectometryILLPreprocess, ReflectometryILLSumForeground, ReflectometryILLPolarizationCor, mtd) import unittest class ReflectometryILLPolarizationCorTest(unittest.TestCase): @classmethod def setUpClass(cls): ReflectometryILLPreprocess(Run='ILL/D17/317369.nxs', Measurement='DirectBeam', ForegroundHalfWidth=5, OutputWorkspace='db') ReflectometryILLPreprocess(Run='ILL/D17/317370.nxs', Measurement='ReflectedBeam', ForegroundHalfWidth=5, OutputWorkspace='rb') # first the direct beam ReflectometryILLSumForeground(InputWorkspace='db', OutputWorkspace='db_frg') # then the reflected beam ReflectometryILLSumForeground(InputWorkspace='rb', OutputWorkspace='rb_frg', SummationType='SumInLambda', DirectLineWorkspace='db', DirectForegroundWorkspace='db_frg') @classmethod def tearDownClass(cls): mtd.clear() def testExecutes(self): ReflectometryILLPolarizationCor( InputWorkspaces='rb_frg', OutputWorkspace='pol_corrected', EfficiencyFile='ILL/D17/PolarizationFactors.txt' ) self.checkOutput(mtd['pol_corrected'], 1, 991) def checkOutput(self, ws, items, blocksize): self.assertTrue(ws) self.assertTrue(isinstance(ws, WorkspaceGroup)) self.assertEquals(ws.getNumberOfEntries(), items) item = ws[0] self.assertTrue(isinstance(item, MatrixWorkspace)) self.assertTrue(item.isHistogramData()) self.assertEquals(item.blocksize(), blocksize) self.assertEquals(item.getNumberHistograms(), 1) self.assertEquals(item.getAxis(0).getUnit().unitID(), 'Wavelength') if __name__ == "__main__": unittest.main()
spaivaras/gr-osmosdr-fork-sdrplay
refs/heads/master
docs/doxygen/doxyxml/doxyindex.py
223
# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Classes providing more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ import os from generated import index from base import Base from text import description class DoxyIndex(Base): """ Parses a doxygen xml directory. """ __module__ = "gnuradio.utils.doxyxml" def _parse(self): if self._parsed: return super(DoxyIndex, self)._parse() self._root = index.parse(os.path.join(self._xml_path, 'index.xml')) for mem in self._root.compound: converted = self.convert_mem(mem) # For files we want the contents to be accessible directly # from the parent rather than having to go through the file # object. if self.get_cls(mem) == DoxyFile: if mem.name.endswith('.h'): self._members += converted.members() self._members.append(converted) else: self._members.append(converted) def generate_swig_doc_i(self): """ %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; """ pass class DoxyCompMem(Base): kind = None def __init__(self, *args, **kwargs): super(DoxyCompMem, self).__init__(*args, **kwargs) @classmethod def can_parse(cls, obj): return obj.kind == cls.kind def set_descriptions(self, parse_data): bd = description(getattr(parse_data, 'briefdescription', None)) dd = description(getattr(parse_data, 'detaileddescription', None)) self._data['brief_description'] = bd self._data['detailed_description'] = dd class DoxyCompound(DoxyCompMem): pass class DoxyMember(DoxyCompMem): pass class DoxyFunction(DoxyMember): __module__ = "gnuradio.utils.doxyxml" kind = 'function' def _parse(self): if self._parsed: return super(DoxyFunction, self)._parse() self.set_descriptions(self._parse_data) self._data['params'] = [] prms = self._parse_data.param for prm in prms: self._data['params'].append(DoxyParam(prm)) brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) params = property(lambda self: self.data()['params']) Base.mem_classes.append(DoxyFunction) class DoxyParam(DoxyMember): __module__ = "gnuradio.utils.doxyxml" def _parse(self): if self._parsed: return super(DoxyParam, self)._parse() self.set_descriptions(self._parse_data) self._data['declname'] = self._parse_data.declname brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) declname = property(lambda self: self.data()['declname']) class DoxyClass(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'class' def _parse(self): if self._parsed: return super(DoxyClass, self)._parse() self.retrieve_data() if self._error: return self.set_descriptions(self._retrieved_data.compounddef) # Sectiondef.kind tells about whether private or public. # We just ignore this for now. self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) Base.mem_classes.append(DoxyClass) class DoxyFile(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'file' def _parse(self): if self._parsed: return super(DoxyFile, self)._parse() self.retrieve_data() self.set_descriptions(self._retrieved_data.compounddef) if self._error: return self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) Base.mem_classes.append(DoxyFile) class DoxyNamespace(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'namespace' Base.mem_classes.append(DoxyNamespace) class DoxyGroup(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'group' def _parse(self): if self._parsed: return super(DoxyGroup, self)._parse() self.retrieve_data() if self._error: return cdef = self._retrieved_data.compounddef self._data['title'] = description(cdef.title) # Process inner groups grps = cdef.innergroup for grp in grps: converted = DoxyGroup.from_refid(grp.refid, top=self.top) self._members.append(converted) # Process inner classes klasses = cdef.innerclass for kls in klasses: converted = DoxyClass.from_refid(kls.refid, top=self.top) self._members.append(converted) # Process normal members self.process_memberdefs() title = property(lambda self: self.data()['title']) Base.mem_classes.append(DoxyGroup) class DoxyFriend(DoxyMember): __module__ = "gnuradio.utils.doxyxml" kind = 'friend' Base.mem_classes.append(DoxyFriend) class DoxyOther(Base): __module__ = "gnuradio.utils.doxyxml" kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page']) @classmethod def can_parse(cls, obj): return obj.kind in cls.kinds Base.mem_classes.append(DoxyOther)
1st/django
refs/heads/master
django/contrib/gis/admin/widgets.py
449
import logging from django.contrib.gis.gdal import GDALException from django.contrib.gis.geos import GEOSException, GEOSGeometry from django.forms.widgets import Textarea from django.template import loader from django.utils import six, translation # Creating a template context that contains Django settings # values needed by admin map templates. geo_context = {'LANGUAGE_BIDI': translation.get_language_bidi()} logger = logging.getLogger('django.contrib.gis') class OpenLayersWidget(Textarea): """ Renders an OpenLayers map using the WKT of the geometry. """ def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs) self.params['editable'] = self.params['modifiable'] else: self.params['editable'] = True # Defaulting the WKT value to a blank string -- this # will be tested in the JavaScript and the appropriate # interface will be constructed. self.params['wkt'] = '' # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.string_types): try: value = GEOSGeometry(value) except (GEOSException, ValueError) as err: logger.error( "Error creating geometry from value '%s' (%s)" % ( value, err) ) value = None if (value and value.geom_type.upper() != self.geom_type and self.geom_type != 'GEOMETRY'): value = None # Constructing the dictionary of the map options. self.params['map_options'] = self.map_options() # Constructing the JavaScript module name using the name of # the GeometryField (passed in via the `attrs` keyword). # Use the 'name' attr for the field name (rather than 'field') self.params['name'] = name # note: we must switch out dashes for underscores since js # functions are created using the module variable js_safe_name = self.params['name'].replace('-', '_') self.params['module'] = 'geodjango_%s' % js_safe_name if value: # Transforming the geometry to the projection used on the # OpenLayers map. srid = self.params['srid'] if value.srid != srid: try: ogr = value.ogr ogr.transform(srid) wkt = ogr.wkt except GDALException as err: logger.error( "Error transforming geometry from srid '%s' to srid '%s' (%s)" % ( value.srid, srid, err) ) wkt = '' else: wkt = value.wkt # Setting the parameter WKT with that of the transformed # geometry. self.params['wkt'] = wkt self.params.update(geo_context) return loader.render_to_string(self.template, self.params) def map_options(self): "Builds the map options hash for the OpenLayers template." # JavaScript construction utilities for the Bounds and Projection. def ol_bounds(extent): return 'new OpenLayers.Bounds(%s)' % str(extent) def ol_projection(srid): return 'new OpenLayers.Projection("EPSG:%s")' % srid # An array of the parameter name, the name of their OpenLayers # counterpart, and the type of variable they are. map_types = [('srid', 'projection', 'srid'), ('display_srid', 'displayProjection', 'srid'), ('units', 'units', str), ('max_resolution', 'maxResolution', float), ('max_extent', 'maxExtent', 'bounds'), ('num_zoom', 'numZoomLevels', int), ('max_zoom', 'maxZoomLevels', int), ('min_zoom', 'minZoomLevel', int), ] # Building the map options hash. map_options = {} for param_name, js_name, option_type in map_types: if self.params.get(param_name, False): if option_type == 'srid': value = ol_projection(self.params[param_name]) elif option_type == 'bounds': value = ol_bounds(self.params[param_name]) elif option_type in (float, int): value = self.params[param_name] elif option_type in (str,): value = '"%s"' % self.params[param_name] else: raise TypeError map_options[js_name] = value return map_options
dracos/django
refs/heads/master
django/contrib/admin/migrations/0001_initial.py
95
import django.contrib.admin.models from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contenttypes', '__first__'), ] operations = [ migrations.CreateModel( name='LogEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')), ('object_id', models.TextField(null=True, verbose_name='object id', blank=True)), ('object_repr', models.CharField(max_length=200, verbose_name='object repr')), ('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')), ('change_message', models.TextField(verbose_name='change message', blank=True)), ('content_type', models.ForeignKey( to_field='id', on_delete=models.SET_NULL, blank=True, null=True, to='contenttypes.ContentType', verbose_name='content type', )), ('user', models.ForeignKey( to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name='user', )), ], options={ 'ordering': ('-action_time',), 'db_table': 'django_admin_log', 'verbose_name': 'log entry', 'verbose_name_plural': 'log entries', }, bases=(models.Model,), managers=[ ('objects', django.contrib.admin.models.LogEntryManager()), ], ), ]
zachhuff386/tunldb
refs/heads/master
tunldb.py
1
import Queue import time import collections import threading import thread import uuid import copy import itertools import json import os TRANSACTION_METHODS = { 'set', 'increment', 'decrement', 'remove', 'rename', 'expire', 'set_add', 'set_remove', 'set_pop', 'list_lpush', 'list_rpush', 'list_lpop', 'list_rpop', 'list_remove', 'dict_set', 'dict_remove', } CHANNEL_TTL = 120 CHANNEL_BUFFER = 128 class TunlDB(object): def __init__(self): self._path = None self._set_queue = Queue.Queue() self._data = collections.defaultdict( lambda: {'ttl': None, 'val': None}) self._timers = {} self._channels = collections.defaultdict( lambda: {'subs': set(), 'msgs': collections.deque( maxlen=CHANNEL_BUFFER), 'timer': None}) self._commit_log = [] def _put_queue(self): if self._path: self._set_queue.put('set') def _export_thread(self): while True: try: self._set_queue.get(timeout=5) except Queue.Empty: continue # Attempt to get more db sets form queue to reduce export calls for _ in xrange(50): try: self._set_queue.get(timeout=0.01) except Queue.Empty: pass self.export_data() def _validate(self, value): if value is not None and not isinstance(value, basestring): raise TypeError('Value must be string') def persist(self, path, auto_export=True): if self._path: raise ValueError('Persist is already set') self._path = path self.import_data() if auto_export: export_thread = threading.Thread(target=self._export_thread) export_thread.daemon = True export_thread.start() def set(self, key, value): self._validate(value) self._data[key]['val'] = value self._put_queue() def get(self, key): data = self._data.get(key) if data: return data['val'] def exists(self, key): return key in self._data def rename(self, key, new_key): data = self._data.get(key) if data: self._data[new_key]['val'] = data['val'] self.remove(key) self._put_queue() def remove(self, key): self._data.pop(key, None) self._put_queue() def expire(self, key, ttl): ttl_time = int(time.time() * 1000) + int(ttl * 1000) cur_timer = self._timers.pop(key, None) if cur_timer: cur_timer.cancel() timer = threading.Timer(ttl, self.remove, (key,)) timer.daemon = True self._timers[key] = timer timer.start() self._data[key]['ttl'] = ttl_time self._put_queue() def increment(self, key): value = '1' data = self._data.get(key) if data: try: value = str(int(data['val']) + 1) data['val'] = value except (TypeError, ValueError): data['val'] = value else: self._data[key]['val'] = value self._put_queue() return value def decrement(self, key): value = '-1' data = self._data.get(key) if data: try: value = str(int(data['val']) - 1) data['val'] = value except (TypeError, ValueError): data['val'] = value else: self._data[key]['val'] = value self._put_queue() return value def keys(self): return set(self._data) def set_add(self, key, element): self._validate(element) data = self._data.get(key) if data: try: data['val'].add(element) except AttributeError: data['val'] = {element} else: self._data[key]['val'] = {element} self._put_queue() def set_remove(self, key, element): data = self._data.get(key) if data: try: data['val'].remove(element) self._put_queue() except (KeyError, AttributeError): pass def set_pop(self, key): value = None data = self._data.get(key) if data: try: value = data['val'].pop() self._put_queue() except (KeyError, AttributeError): pass return value def set_exists(self, key, element): data = self._data.get(key) if data: try: return element in data['val'] except (TypeError, AttributeError): pass return False def set_elements(self, key): data = self._data.get(key) if data: try: return data['val'].copy() except AttributeError: pass return set() def set_iter(self, key): data = self._data.get(key) if data: try: for value in data['val'].copy(): yield value except AttributeError: pass def set_length(self, key): data = self._data.get(key) if data: try: return len(data['val']) except TypeError: pass return 0 def list_lpush(self, key, value): self._validate(value) data = self._data.get(key) if data: try: data['val'].appendleft(value) except AttributeError: data['val'] = collections.deque([value]) else: self._data[key]['val'] = collections.deque([value]) self._put_queue() def list_rpush(self, key, value): self._validate(value) data = self._data.get(key) if data: try: data['val'].append(value) except AttributeError: data['val'] = collections.deque([value]) else: self._data[key]['val'] = collections.deque([value]) self._put_queue() def list_lpop(self, key): value = None data = self._data.get(key) if data: try: value = data['val'].popleft() self._put_queue() except (AttributeError, IndexError): pass return value def list_rpop(self, key): value = None data = self._data.get(key) if data: try: value = data['val'].pop() self._put_queue() except (AttributeError, IndexError): pass return value def list_index(self, key, index): data = self._data.get(key) if data: try: return data['val'][index] except (AttributeError, IndexError): pass def list_elements(self, key): data = self._data.get(key) if data: try: return list(data['val']) except TypeError: pass return [] def list_iter(self, key): data = self._data.get(key) if data: try: for value in copy.copy(data['val']): yield value except TypeError: pass def list_iter_range(self, key, start, stop=None): data = self._data.get(key) if data: try: for value in itertools.islice( copy.copy(data['val']), start, stop): yield value except TypeError: pass def list_remove(self, key, value, count=1): self._validate(value) data = self._data.get(key) if data: if count: try: [data['val'].remove(value) for _ in xrange(count)] except (AttributeError, ValueError): pass else: try: while True: data['val'].remove(value) except (AttributeError, ValueError): pass self._put_queue() def list_length(self, key): data = self._data.get(key) if data: try: return len(data['val']) except TypeError: pass return 0 def dict_set(self, key, field, value): self._validate(value) data = self._data.get(key) if data: try: data['val'][field] = value except TypeError: data['val'] = {field: value} else: self._data[key]['val'] = {field: value} self._put_queue() def dict_get(self, key, field): data = self._data.get(key) if data: try: return data['val'].get(field) except TypeError: pass def dict_remove(self, key, field): data = self._data.get(key) if data: try: data['val'].pop(field, None) except AttributeError: pass self._put_queue() def dict_keys(self, key): data = self._data.get(key) if data: try: return set(data['val']) except AttributeError: pass return set() def dict_values(self, key): data = self._data.get(key) if data: try: return set(data['val'].values()) except AttributeError: pass return set() def dict_iter(self, key): data = self._data.get(key) if data: data_copy = data['val'].copy() try: for field in data_copy: yield field, data_copy[field] except (TypeError, AttributeError): pass def dict_get_all(self, key): data = self._data.get(key) if data: try: return data['val'].copy() except AttributeError: pass return {} def _clear_channel(self, channel): if not self._channels[channel]['subs']: self._channels.pop(channel, None) else: self._channels[channel]['timer'] = None self._channels[channel]['msgs'] = collections.deque( maxlen=CHANNEL_BUFFER) def subscribe(self, channel, timeout=None): event = threading.Event() self._channels[channel]['subs'].add(event) try: try: cursor = self._channels[channel]['msgs'][-1][0] except IndexError: cursor = None while True: if not cursor: cursor_found = True else: cursor_found = False if not event.wait(timeout): break event.clear() messages = copy.copy(self._channels[channel]['msgs']) for message in messages: if cursor_found: yield message[1] elif message[0] == cursor: cursor_found = True if not cursor_found: for message in messages: yield message[1] try: cursor = messages[-1][0] except IndexError: cursor = None finally: try: self._channels[channel]['subs'].remove(event) except KeyError: pass def publish(self, channel, message): cur_timer = self._channels[channel]['timer'] if cur_timer: cur_timer.cancel() timer = threading.Timer(CHANNEL_TTL, self._clear_channel, (channel,)) timer.daemon = True self._channels[channel]['timer'] = timer timer.start() self._channels[channel]['msgs'].append((uuid.uuid4().hex, message)) for subscriber in self._channels[channel]['subs'].copy(): subscriber.set() def transaction(self): return TunlDBTransaction(self) def _apply_trans(self, trans): for call in trans[1]: getattr(self, call[0])(*call[1], **call[2]) try: self._commit_log.remove(trans) except ValueError: pass self._put_queue() def export_data(self): if not self._path: return temp_path = self._path + '_%s.tmp' % uuid.uuid4().hex try: data = self._data.copy() timers = self._timers.keys() commit_log = copy.copy(self._commit_log) with open(temp_path, 'w') as db_file: os.chmod(temp_path, 0600) export_data = [] for key in data: key_ttl = data[key]['ttl'] key_val = data[key]['val'] key_type = type(key_val).__name__ if key_type == 'set' or key_type == 'deque': key_val = list(key_val) export_data.append((key, key_type, key_ttl, key_val)) db_file.write(json.dumps({ 'ver': 1, 'data': export_data, 'timers': timers, 'commit_log': commit_log, })) os.rename(temp_path, self._path) except: try: os.remove(temp_path) except OSError: pass raise def import_data(self): if os.path.isfile(self._path): with open(self._path, 'r') as db_file: import_data = json.loads(db_file.read()) data = import_data['data'] for key_data in data: key = key_data[0] key_type = key_data[1] key_ttl = key_data[2] key_val = key_data[3] if key_type == 'set': key_val = set(key_val) elif key_type == 'deque': key_val = collections.deque(key_val) self._data[key] = { 'ttl': key_ttl, 'val': key_val, } if 'timers' in import_data: for key in import_data['timers']: if key not in self._data: continue ttl = self._data[key]['ttl'] if not ttl: continue ttl -= int(time.time() * 1000) ttl /= 1000.0 if ttl >= 0: timer = threading.Timer(ttl, self.remove, (key,)) timer.daemon = True self._timers[key] = timer timer.start() else: self.remove(key) if 'commit_log' in import_data: for tran in import_data['commit_log']: self._apply_trans(tran) class TunlDBTransaction(object): def __init__(self, tunldb): self._tunldb = tunldb self._trans = [] def __getattr__(self, name): if name in TRANSACTION_METHODS: def serialize(*args, **kwargs): self._trans.append((name, args, kwargs)) return serialize return getattr(self._tunldb, name) def commit(self): trans = (uuid.uuid4().hex, self._trans) self._tunldb._commit_log.append(trans) self._trans = [] self._tunldb._apply_trans(trans)
shaobozi/googletest
refs/heads/master
test/gtest_output_test.py
496
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Testing Framework. SYNOPSIS gtest_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gtest_output_test_ file. gtest_output_test.py --gengolden gtest_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' # TODO(vladl@google.com): remove the _lin suffix. GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') # At least one command we exercise must not have the # --gtest_internal_skip_environment_and_ad_hoc_tests flag. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of gtest_output_test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS and not IS_WINDOWS) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, and multiple threads). Please generate the golden file using a binary built with those features enabled.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
youdonghai/intellij-community
refs/heads/master
python/helpers/py2only/docutils/languages/en.py
246
# $Id: en.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ English-language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': 'Author', 'authors': 'Authors', 'organization': 'Organization', 'address': 'Address', 'contact': 'Contact', 'version': 'Version', 'revision': 'Revision', 'status': 'Status', 'date': 'Date', 'copyright': 'Copyright', 'dedication': 'Dedication', 'abstract': 'Abstract', 'attention': 'Attention!', 'caution': 'Caution!', 'danger': '!DANGER!', 'error': 'Error', 'hint': 'Hint', 'important': 'Important', 'note': 'Note', 'tip': 'Tip', 'warning': 'Warning', 'contents': 'Contents'} """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed 'author': 'author', 'authors': 'authors', 'organization': 'organization', 'address': 'address', 'contact': 'contact', 'version': 'version', 'revision': 'revision', 'status': 'status', 'date': 'date', 'copyright': 'copyright', 'dedication': 'dedication', 'abstract': 'abstract'} """English (lowcased) to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
huntxu/neutron
refs/heads/master
neutron/manager.py
2
# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from neutron_lib.plugins import constants as lib_const from neutron_lib.plugins import directory from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import periodic_task from oslo_utils import excutils from osprofiler import profiler import six from neutron._i18n import _ from neutron.common import utils from neutron.plugins.common import constants LOG = logging.getLogger(__name__) CORE_PLUGINS_NAMESPACE = 'neutron.core_plugins' class ManagerMeta(profiler.TracedMeta, type(periodic_task.PeriodicTasks)): pass @six.add_metaclass(ManagerMeta) class Manager(periodic_task.PeriodicTasks): __trace_args__ = {"name": "rpc"} # Set RPC API version to 1.0 by default. target = oslo_messaging.Target(version='1.0') def __init__(self, host=None): if not host: host = cfg.CONF.host self.host = host conf = getattr(self, "conf", cfg.CONF) super(Manager, self).__init__(conf) def periodic_tasks(self, context, raise_on_error=False): self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def after_start(self): """Handler post initialization stuff. Child classes can override this method. """ pass def validate_post_plugin_load(): """Checks if the configuration variables are valid. If the configuration is invalid then the method will return an error message. If all is OK then it will return None. """ if ('dhcp_agents_per_network' in cfg.CONF and cfg.CONF.dhcp_agents_per_network <= 0): msg = _("dhcp_agents_per_network must be >= 1. '%s' " "is invalid.") % cfg.CONF.dhcp_agents_per_network return msg def validate_pre_plugin_load(): """Checks if the configuration variables are valid. If the configuration is invalid then the method will return an error message. If all is OK then it will return None. """ if cfg.CONF.core_plugin is None: msg = _('Neutron core_plugin not configured!') return msg @six.add_metaclass(profiler.TracedMeta) class NeutronManager(object): """Neutron's Manager class. Neutron's Manager class is responsible for parsing a config file and instantiating the correct plugin that concretely implements neutron_plugin_base class. """ # TODO(armax): use of the singleton pattern for this class is vestigial, # and it is mainly relied on by the unit tests. It is safer to get rid # of it once the entire codebase (neutron + subprojects) has switched # entirely to using the plugins directory. _instance = None __trace_args__ = {"name": "rpc"} def __init__(self, options=None, config_file=None): # If no options have been provided, create an empty dict if not options: options = {} msg = validate_pre_plugin_load() if msg: LOG.critical(msg) raise Exception(msg) # NOTE(jkoelker) Testing for the subclass with the __subclasshook__ # breaks tach monitoring. It has been removed # intentionally to allow v2 plugins to be monitored # for performance metrics. plugin_provider = cfg.CONF.core_plugin LOG.info("Loading core plugin: %s", plugin_provider) # NOTE(armax): keep hold of the actual plugin object plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE, plugin_provider) directory.add_plugin(lib_const.CORE, plugin) msg = validate_post_plugin_load() if msg: LOG.critical(msg) raise Exception(msg) # load services from the core plugin first self._load_services_from_core_plugin(plugin) self._load_service_plugins() # Used by pecan WSGI self.resource_plugin_mappings = {} self.resource_controller_mappings = {} self.path_prefix_resource_mappings = defaultdict(list) @staticmethod def load_class_for_provider(namespace, plugin_provider): """Loads plugin using alias or class name :param namespace: namespace where alias is defined :param plugin_provider: plugin alias or class name :returns: plugin that is loaded :raises ImportError: if fails to load plugin """ try: return runtime.load_class_by_alias_or_classname(namespace, plugin_provider) except ImportError: with excutils.save_and_reraise_exception(): LOG.error("Plugin '%s' not found.", plugin_provider) def _get_plugin_instance(self, namespace, plugin_provider): plugin_class = self.load_class_for_provider(namespace, plugin_provider) return plugin_class() def _load_services_from_core_plugin(self, plugin): """Puts core plugin in service_plugins for supported services.""" LOG.debug("Loading services supported by the core plugin") # supported service types are derived from supported extensions for ext_alias in getattr(plugin, "supported_extension_aliases", []): if ext_alias in constants.EXT_TO_SERVICE_MAPPING: service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] directory.add_plugin(service_type, plugin) LOG.info("Service %s is supported by the core plugin", service_type) def _get_default_service_plugins(self): """Get default service plugins to be loaded.""" core_plugin = directory.get_plugin() if core_plugin.has_native_datastore(): return constants.DEFAULT_SERVICE_PLUGINS.keys() else: return [] def _load_service_plugins(self): """Loads service plugins. Starts from the core plugin and checks if it supports advanced services then loads classes provided in configuration. """ plugin_providers = cfg.CONF.service_plugins plugin_providers.extend(self._get_default_service_plugins()) LOG.debug("Loading service plugins: %s", plugin_providers) for provider in plugin_providers: if provider == '': continue LOG.info("Loading Plugin: %s", provider) plugin_inst = self._get_plugin_instance('neutron.service_plugins', provider) # only one implementation of svc_type allowed # specifying more than one plugin # for the same type is a fatal exception # TODO(armax): simplify this by moving the conditional into the # directory itself. plugin_type = plugin_inst.get_plugin_type() if directory.get_plugin(plugin_type): raise ValueError(_("Multiple plugins for service " "%s were configured") % plugin_type) directory.add_plugin(plugin_type, plugin_inst) # search for possible agent notifiers declared in service plugin # (needed by agent management extension) plugin = directory.get_plugin() if (hasattr(plugin, 'agent_notifiers') and hasattr(plugin_inst, 'agent_notifiers')): plugin.agent_notifiers.update(plugin_inst.agent_notifiers) # disable incompatible extensions in core plugin if any utils.disable_extension_by_service_plugin(plugin, plugin_inst) LOG.debug("Successfully loaded %(type)s plugin. " "Description: %(desc)s", {"type": plugin_type, "desc": plugin_inst.get_plugin_description()}) @classmethod @runtime.synchronized("manager") def _create_instance(cls): if not cls.has_instance(): cls._instance = cls() @classmethod def has_instance(cls): return cls._instance is not None @classmethod def clear_instance(cls): cls._instance = None @classmethod def get_instance(cls): # double checked locking if not cls.has_instance(): cls._create_instance() return cls._instance @classmethod def set_plugin_for_resource(cls, resource, plugin): cls.get_instance().resource_plugin_mappings[resource] = plugin @classmethod def get_plugin_for_resource(cls, resource): return cls.get_instance().resource_plugin_mappings.get(resource) @classmethod def set_controller_for_resource(cls, resource, controller): cls.get_instance().resource_controller_mappings[resource] = controller @classmethod def get_controller_for_resource(cls, resource): resource = resource.replace('_', '-') res_ctrl_mappings = cls.get_instance().resource_controller_mappings # If no controller is found for resource, try replacing dashes with # underscores return res_ctrl_mappings.get( resource, res_ctrl_mappings.get(resource.replace('-', '_'))) # TODO(blogan): This isn't used by anything else other than tests and # probably should be removed @classmethod def get_service_plugin_by_path_prefix(cls, path_prefix): service_plugins = directory.get_unique_plugins() for service_plugin in service_plugins: plugin_path_prefix = getattr(service_plugin, 'path_prefix', None) if plugin_path_prefix and plugin_path_prefix == path_prefix: return service_plugin @classmethod def add_resource_for_path_prefix(cls, resource, path_prefix): resources = cls.get_instance().path_prefix_resource_mappings[ path_prefix].append(resource) return resources @classmethod def get_resources_for_path_prefix(cls, path_prefix): return cls.get_instance().path_prefix_resource_mappings[path_prefix] def init(): """Call to load the plugins (core+services) machinery.""" if not directory.is_loaded(): NeutronManager.get_instance()
KhronosGroup/COLLADA-CTS
refs/heads/master
StandardDataSets/collada/library_visual_scenes/visual_scene/node/transform_stack/node_trs_stack/node_trs_stack.py
4
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = [] attrName = '' attrVal = '' dataToCheck = '' nodeIdLst = ['cube_node'] class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data, _nodeIdLst): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.nodeIdLst = _nodeIdLst self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) if (self.__assistant.GetResults() == False): self.status_baseline = False return False # Compare the import and export rendered images self.__assistant.CompareRenderedImages(context) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # To pass superior you need to pass baseline, this object could also include additional # tests that were specific to the superior badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass exemplary you need to pass superior, this object could also include additional # tests that were specific to the exemplary badge def JudgeExemplary(self, context): # if superior fails, no point in further checking if (self.status_superior == False): self.status_exemplary = self.status_superior return self.status_exemplary # Checks for complete preservation of the transform stack self.__assistant.TransformStackPreserved(context, self.nodeIdLst) self.status_exemplary = self.__assistant.DeferJudgement(context) return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck, nodeIdLst);
drnextgis/QGIS
refs/heads/master
tests/src/python/test_versioncompare.py
74
# -*- coding: utf-8 -*- ''' test_versioncompare.py -------------------------------------- Date : September 2016 Copyright : (C) 2016 Alexander Bruy email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ ''' import qgis # NOQA from qgis.testing import unittest, start_app from pyplugin_installer.version_compare import compareVersions start_app() class TestVersionCompare(unittest.TestCase): def setUp(self): """Run before each test.""" pass def tearDown(self): """Run after each test.""" pass def testCompareVersions(self): a = '1.0.0' # a == b b = '1.0.0' self.assertEqual(compareVersions(a, b), 0) # a > b b = '0.1.0' self.assertEqual(compareVersions(a, b), 1) # b > a b = '1.1.0' self.assertEqual(compareVersions(a, b), 2) # test that prefix stripped correctly a = 'ver. 1.0.0' b = 'ver. 0.1.0' self.assertEqual(compareVersions(a, b), 1) # test versions with build numbers a = '1.0.0-1' b = '1.0.0-2' self.assertEqual(compareVersions(a, b), 2) # test versions with suffixes a = '1.0.0a' b = '1.0.0b' self.assertEqual(compareVersions(a, b), 2) # test versions with suffixes in different cases a = '1.0.0-201609011405-2690BD9' b = '1.0.0-201609011405-2690bd9' self.assertEqual(compareVersions(a, b), 0) if __name__ == '__main__': unittest.main()
zahanm/foodpedia
refs/heads/master
djangoappengine/tests/not_return_sets.py
107
from .testmodels import FieldsWithOptionsModel, OrderedModel, SelfReferenceModel import datetime from django.test import TestCase from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned class NonReturnSetsTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58, 2.4] emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de', 'rasengan@naruto.com', 'itachi@uchia.com'] def setUp(self): for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats, NonReturnSetsTest.emails)): self.last_save_time = datetime.datetime.now().time() ordered_instance = OrderedModel(priority=index, pk=index + 1) ordered_instance.save() model = FieldsWithOptionsModel(floating_point=float, integer=int(float), email=email, time=self.last_save_time, foreign_key=ordered_instance) model.save() def test_get(self): self.assertEquals(FieldsWithOptionsModel.objects.get( email='itachi@uchia.com') .email, 'itachi@uchia.com') # test exception when matching multiple entities self.assertRaises(MultipleObjectsReturned, FieldsWithOptionsModel.objects .get, integer=2) # test exception when entity does not exist self.assertRaises(ObjectDoesNotExist, FieldsWithOptionsModel.objects .get, floating_point=5.2) # TODO: test create when djangos model.save_base is refactored # TODO: test get_or_create when refactored def test_count(self): self.assertEquals(FieldsWithOptionsModel.objects.filter( integer=2).count(), 2) def test_in_bulk(self): self.assertEquals([key in ['sharingan@uchias.com', 'itachi@uchia.com'] for key in FieldsWithOptionsModel.objects.in_bulk( ['sharingan@uchias.com', 'itachi@uchia.com']).keys()], [True, ]*2) def test_latest(self): self.assertEquals('itachi@uchia.com', FieldsWithOptionsModel.objects .latest('time').email) def test_exists(self): self.assertEquals(True, FieldsWithOptionsModel.objects.exists()) def test_deletion(self): # TODO: ForeignKeys will not be deleted! This has to be done via # background tasks self.assertEquals(FieldsWithOptionsModel.objects.count(), 5) FieldsWithOptionsModel.objects.get(email='itachi@uchia.com').delete() self.assertEquals(FieldsWithOptionsModel.objects.count(), 4) FieldsWithOptionsModel.objects.filter(email__in=['sharingan@uchias.com', 'itachi@uchia.com', 'rasengan@naruto.com', ]).delete() self.assertEquals(FieldsWithOptionsModel.objects.count(), 2) def test_selfref_deletion(self): entity = SelfReferenceModel() entity.save() entity.delete() def test_foreign_key_fetch(self): # test fetching the ForeignKey ordered_instance = OrderedModel.objects.get(priority=2) self.assertEquals(FieldsWithOptionsModel.objects.get(integer=9).foreign_key, ordered_instance) def test_foreign_key_backward(self): entity = OrderedModel.objects.all()[0] self.assertEquals(entity.keys.count(), 1) # TODO: add should save the added instance transactional via for example # force_insert new_foreign_key = FieldsWithOptionsModel(floating_point=5.6, integer=3, email='temp@temp.com', time=datetime.datetime.now()) entity.keys.add(new_foreign_key) self.assertEquals(entity.keys.count(), 2) # TODO: add test for create entity.keys.remove(new_foreign_key) self.assertEquals(entity.keys.count(), 1) entity.keys.clear() self.assertTrue(not entity.keys.exists()) entity.keys = [new_foreign_key, new_foreign_key] self.assertEquals(entity.keys.count(), 1) self.assertEquals(entity.keys.all()[0].integer, 3)
legco-watch/legco-watch
refs/heads/master
app/common/models.py
2
from django.db import models class ErrorReportManager(models.Manager): def open_errors(self): return self.filter(resolved=False) class ErrorReport(models.Model): """ Reports of inaccuracies in the data """ # Timestamp of the when the report was created reported = models.DateTimeField() # The url of the page that the error is on url = models.TextField() # User comment comment = models.TextField(blank=True) resolved = models.BooleanField(default=False) objects = ErrorReportManager() class Meta: ordering = ['-reported'] def __unicode__(self): return self.reported.strftime('%Y-%m-%d %H:%M:%S')
qskycolor/viewfinder
refs/heads/master
backend/op/op_manager.py
13
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """Viewfinder operation manager. The operation manager tracks and executes operations submitted by user devices. The key goals of the operation manager are: - Provide restart functionality for incomplete operations - Serialize operations coming from a single device - Mutual exclusion between multiple processing servers (only one server may operate on a user's operations at a time). Restart functionality is achieved by writing each operation as JSON-encoded data to the Operation table. Operations are given a unique id that is allocated by client devices, which should be the order that the client would like them run by the server. Mutual exclusion is assured by acquiring a per-user lock for operations submitted by a particular user. The operation lock provides a server with exclusive access to operations for a user. With the lock, the server processes each pending operation in order for a device (operations from multiple devices may be interleaved). Another server receiving an operation for a locked user will simply write the op to the database and continue. If a server with a lock crashes, then operations for that user will stall for a maximum of the lock expiration time. Each server periodically scans the lock table to pick up and resuscitate idle user operation queues which were dropped or ignored (e.g. due to excessive load). In cases where an operation hits transient problems (such as database unavailability) or bugs, the operation will be retried by the manager. After a number of such retries, the operation manager will eventually give up and put that operation into "quarantine", which means it will be saved in the database for later developer inspection and repair. The quarantine state is useful because without it, a failed operation would retain the operation lock and prevent all future operations for that user from executing. This would result in total user lockout. OpManager: one instance per server; processes user ops which have fallen through the cracks """ __authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)', 'andy@emailscrubbed.com (Andy Kimball)'] import logging import random import time from datetime import timedelta from functools import partial from tornado import gen, stack_context from tornado.ioloop import IOLoop from viewfinder.backend.base import message, util from viewfinder.backend.db import db_client from viewfinder.backend.db.lock import Lock from viewfinder.backend.db.lock_resource_type import LockResourceType class OpManager(object): """Submit new operations to the op manager via the "MaybeExecuteOp" method. The OpManager class manages the set of all users that have submitted operations to this server. However, the queue of operations is actually managed and executed by an instance of the UserOpManager class. Periodically scans the database for abandoned locks and failed operations. Each abandoned lock is associated with user operations that have stalled and need to be restarted. Each failed operation needs to be periodically retried in order to see if the underlying issue has been fixed. On startup, a random time offset is chosen before initiating the first scan. This is meant to avoid multiple servers scanning the same data. This class is meant to be a singleton for each instance of the server. Access the instance via OpManager.Instance(). """ _MAX_USERS_OUTSTANDING = 1000 """Maximum number of users that can be under management for scans to take place.""" _SCAN_LIMIT = 10 """Maximum number of abandoned locks and failed operations that will be returned from scans (i.e. after filtering). """ _MAX_SCAN_ABANDONED_LOCKS_INTERVAL = timedelta(seconds=60) """Time between scans for abandoned locks.""" _MAX_SCAN_FAILED_OPS_INTERVAL = timedelta(hours=6) """Time between scans for failed operations to retry.""" def __init__(self, op_map, client=None, scan_ops=False): """Initializes the operation map, which is a dictionary mapping from operation method str to an instance of OpMapEntry. Also initializes maps for active users (map from user id to an instance of UserOpManager). """ self.op_map = op_map self._client = client or db_client.Instance() self._active_users = dict() self._drain_callback = None if scan_ops: self._ScanAbandonedLocks() self._ScanFailedOps() def WaitForUserOps(self, client, user_id, callback): """Wait for all ops running on behalf of user_id to complete. WaitForOp behaves exactly like using the "synchronous" option when submitting an operation. The callback will be invoked once all operations are completed or they're backed off due to repeated failure. """ self.MaybeExecuteOp(client, user_id, None, callback) def Drain(self, callback): """Invokes "callback" when there is no current work to be done. To be used for cleanup in tests. """ if not self._active_users: IOLoop.current().add_callback(callback) else: self._drain_callback = stack_context.wrap(callback) def MaybeExecuteOp(self, client, user_id, operation_id, wait_callback=None): """Adds the op's user to the queue and attempts to begin processing the operation. If the user is already locked by another server, or if this server is already executing operations for this user, then the operation is merely queued for later execution. If the "wait_callback" function is specified, then it is called once the operation has completed execution (or an error has occurred). This is useful for testing. The callback should have the form: OnExecution(value=None, type=None, tb=None) """ from viewfinder.backend.op.user_op_manager import UserOpManager user_op_mgr = self._active_users.get(user_id, None) if user_op_mgr is None: user_op_mgr = UserOpManager(client, self.op_map, user_id, partial(self._OnCompletedOp, user_id)) self._active_users[user_id] = user_op_mgr user_op_mgr.Execute(operation_id, wait_callback) def _OnCompletedOp(self, user_id): """Removes the user from the list of active users, since all of that user's operations have been executed. """ del self._active_users[user_id] if not self._active_users and self._drain_callback: IOLoop.current().add_callback(self._drain_callback) self._drain_callback = None @gen.engine def _ScanFailedOps(self): """Periodically scans the Operation table for operations which have failed and are ready to retry. If any are found, they are retried to see if the error that originally caused them to fail has been fixed. """ from viewfinder.backend.db.operation import Operation max_timeout_secs = OpManager._MAX_SCAN_FAILED_OPS_INTERVAL.total_seconds() while True: # If there are too many active users, do not scan. if len(self._active_users) < self._MAX_USERS_OUTSTANDING: try: last_key = None while True: limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT) ops, last_key = yield gen.Task(Operation.ScanFailed, self._client, limit=limit, excl_start_key=last_key) # Add each operation to the queue for the owning user. for op in ops: logging.info('scanned failed operation "%s" for user %d' % (op.operation_id, op.user_id)) if op.user_id not in self._active_users: # Create a clean context for this operation since we're not blocking the current # coroutine on it. with stack_context.NullContext(): with util.ExceptionBarrier(util.LogExceptionCallback): self.MaybeExecuteOp(self._client, op.user_id, op.operation_id) # Keep iterating until all failed operations have been found, otherwise wait until the next scan time. if last_key is None: break except Exception: logging.exception('failed op scan failed') # Wait until next scan time. timeout_secs = random.random() * max_timeout_secs timeout_time = time.time() + timeout_secs logging.debug('next scan in %.2fs' % timeout_secs) yield gen.Task(IOLoop.current().add_timeout, timeout_time) @gen.engine def _ScanAbandonedLocks(self): """Periodically scans the Locks table looking for abandoned operation locks. If any are found, the associated operations are executed. TODO(Andy): Scanning for abandoned locks really should go into a LockManager class. See header for lock.py. """ max_timeout_secs = OpManager._MAX_SCAN_ABANDONED_LOCKS_INTERVAL.total_seconds() while True: # If there are too many active users, do not scan. if len(self._active_users) < self._MAX_USERS_OUTSTANDING: try: last_key = None while True: limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT) locks, last_key = yield gen.Task(Lock.ScanAbandoned, self._client, limit=limit, excl_start_key=last_key) for lock in locks: resource_type, resource_id = Lock.DeconstructLockId(lock.lock_id) if resource_type == LockResourceType.Operation: user_id = int(resource_id) logging.info('scanned operation lock for user %d' % user_id) # Create a clean context for this operation since we're not blocking the current # coroutine on it. with stack_context.NullContext(): with util.ExceptionBarrier(util.LogExceptionCallback): self.MaybeExecuteOp(self._client, user_id, lock.resource_data) # Keep iterating until all abandoned locks have been found, otherwise wait until the next scan time. if last_key is None: break except Exception: logging.exception('abandoned lock scan failed') # Wait until next scan time. timeout_secs = random.random() * max_timeout_secs timeout_time = time.time() + timeout_secs logging.debug('next scan in %.2fs' % timeout_secs) yield gen.Task(IOLoop.current().add_timeout, timeout_time) @staticmethod def SetInstance(op_manager): """Sets the per-process instance of the OpManager class.""" OpManager._instance = op_manager @staticmethod def Instance(): """Gets the per-process instance of the OpManager class.""" assert hasattr(OpManager, '_instance'), 'instance not initialized' return OpManager._instance class OpMapEntry(object): """The OpManager constructor is supplied with the "operation map", which is a dictionary mapping from operation method str to an instance of this class. Each operation method is associated with the following information: handler: Method to invoke in order to execute the operation. migrators: Message version migrators for the method args. scrubber: Scrubs personal info from operation args before logging. """ def __init__(self, handler, migrators=[], scrubber=None): self.handler = handler self.migrators = sorted(message.REQUIRED_MIGRATORS + migrators) self.scrubber = scrubber
cloakedcode/CouchPotatoServer
refs/heads/master
libs/sqlalchemy/connectors/zxJDBC.py
17
# connectors/zxJDBC.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import sys from sqlalchemy.connectors import Connector class ZxJDBCConnector(Connector): driver = 'zxjdbc' supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_unicode_binds = True supports_unicode_statements = sys.version > '2.5.0+' description_encoding = None default_paramstyle = 'qmark' jdbc_db_name = None jdbc_driver_name = None @classmethod def dbapi(cls): from com.ziclix.python.sql import zxJDBC return zxJDBC def _driver_kwargs(self): """Return kw arg dict to be sent to connect().""" return {} def _create_jdbc_url(self, url): """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`""" return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host, url.port is not None and ':%s' % url.port or '', url.database) def create_connect_args(self, url): opts = self._driver_kwargs() opts.update(url.query) return [ [self._create_jdbc_url(url), url.username, url.password, self.jdbc_driver_name], opts] def is_disconnect(self, e, connection, cursor): if not isinstance(e, self.dbapi.ProgrammingError): return False e = str(e) return 'connection is closed' in e or 'cursor is closed' in e def _get_server_version_info(self, connection): # use connection.connection.dbversion, and parse appropriately # to get a tuple raise NotImplementedError()
webmasterraj/FogOrNot
refs/heads/master
flask/lib/python2.7/site-packages/numpy/core/tests/test_unicode.py
92
from __future__ import division, absolute_import, print_function import sys import numpy as np from numpy.compat import asbytes, unicode, sixu from numpy.testing import TestCase, run_module_suite, assert_equal # Guess the UCS length for this python interpreter if sys.version_info[:2] >= (3, 3): # Python 3.3 uses a flexible string representation ucs4 = False def buffer_length(arr): if isinstance(arr, unicode): arr = str(arr) return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr) v = memoryview(arr) if v.shape is None: return len(v) * v.itemsize else: return np.prod(v.shape) * v.itemsize elif sys.version_info[0] >= 3: import array as _array ucs4 = (_array.array('u').itemsize == 4) def buffer_length(arr): if isinstance(arr, unicode): return _array.array('u').itemsize * len(arr) v = memoryview(arr) if v.shape is None: return len(v) * v.itemsize else: return np.prod(v.shape) * v.itemsize else: if len(buffer(sixu('u'))) == 4: ucs4 = True else: ucs4 = False def buffer_length(arr): if isinstance(arr, np.ndarray): return len(arr.data) return len(buffer(arr)) # In both cases below we need to make sure that the byte swapped value (as # UCS4) is still a valid unicode: # Value that can be represented in UCS2 interpreters ucs2_value = sixu('\u0900') # Value that cannot be represented in UCS2 interpreters (but can in UCS4) ucs4_value = sixu('\U00100900') ############################################################ # Creation tests ############################################################ class create_zeros(object): """Check the creation of zero-valued arrays""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer self.assertTrue(buffer_length(ua) == nbytes) # Small check that data in array element is ok self.assertTrue(ua_scalar == sixu('')) # Encode to ascii and double check self.assertTrue(ua_scalar.encode('ascii') == asbytes('')) # Check buffer lengths for scalars if ucs4: self.assertTrue(buffer_length(ua_scalar) == 0) else: self.assertTrue(buffer_length(ua_scalar) == 0) def test_zeros0D(self): # Check creation of 0-dimensional objects ua = np.zeros((), dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) def test_zerosSD(self): # Check creation of single-dimensional objects ua = np.zeros((2,), dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) def test_zerosMD(self): # Check creation of multi-dimensional objects ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_create_zeros_1(create_zeros, TestCase): """Check the creation of zero-valued arrays (size 1)""" ulen = 1 class test_create_zeros_2(create_zeros, TestCase): """Check the creation of zero-valued arrays (size 2)""" ulen = 2 class test_create_zeros_1009(create_zeros, TestCase): """Check the creation of zero-valued arrays (size 1009)""" ulen = 1009 class create_values(object): """Check the creation of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer self.assertTrue(buffer_length(ua) == nbytes) # Small check that data in array element is ok self.assertTrue(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check self.assertTrue(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if ucs4: self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) else: if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check creation of 0-dimensional objects with values ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) def test_valuesSD(self): # Check creation of single-dimensional objects with values ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) def test_valuesMD(self): # Check creation of multi-dimensional objects with values ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_create_values_1_ucs2(create_values, TestCase): """Check the creation of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class test_create_values_1_ucs4(create_values, TestCase): """Check the creation of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class test_create_values_2_ucs2(create_values, TestCase): """Check the creation of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class test_create_values_2_ucs4(create_values, TestCase): """Check the creation of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class test_create_values_1009_ucs2(create_values, TestCase): """Check the creation of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class test_create_values_1009_ucs4(create_values, TestCase): """Check the creation of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value ############################################################ # Assignment tests ############################################################ class assign_values(object): """Check the assignment of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer self.assertTrue(buffer_length(ua) == nbytes) # Small check that data in array element is ok self.assertTrue(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check self.assertTrue(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if ucs4: self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) else: if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check assignment of 0-dimensional objects with values ua = np.zeros((), dtype='U%s' % self.ulen) ua[()] = self.ucs_value*self.ulen self.content_check(ua, ua[()], 4*self.ulen) def test_valuesSD(self): # Check assignment of single-dimensional objects with values ua = np.zeros((2,), dtype='U%s' % self.ulen) ua[0] = self.ucs_value*self.ulen self.content_check(ua, ua[0], 4*self.ulen*2) ua[1] = self.ucs_value*self.ulen self.content_check(ua, ua[1], 4*self.ulen*2) def test_valuesMD(self): # Check assignment of multi-dimensional objects with values ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) ua[0, 0, 0] = self.ucs_value*self.ulen self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) ua[-1, -1, -1] = self.ucs_value*self.ulen self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_assign_values_1_ucs2(assign_values, TestCase): """Check the assignment of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class test_assign_values_1_ucs4(assign_values, TestCase): """Check the assignment of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class test_assign_values_2_ucs2(assign_values, TestCase): """Check the assignment of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class test_assign_values_2_ucs4(assign_values, TestCase): """Check the assignment of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class test_assign_values_1009_ucs2(assign_values, TestCase): """Check the assignment of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class test_assign_values_1009_ucs4(assign_values, TestCase): """Check the assignment of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value ############################################################ # Byteorder tests ############################################################ class byteorder_values: """Check the byteorder of unicode arrays in round-trip conversions""" def test_values0D(self): # Check byteorder of 0-dimensional objects ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() # This changes the interpretation of the data region (but not the # actual data), therefore the returned scalars are not # the same (they are byte-swapped versions of each other). self.assertTrue(ua[()] != ua2[()]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_valuesSD(self): # Check byteorder of single-dimensional objects ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() self.assertTrue(ua[0] != ua2[0]) self.assertTrue(ua[-1] != ua2[-1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_valuesMD(self): # Check byteorder of multi-dimensional objects ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0]) self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) class test_byteorder_1_ucs2(byteorder_values, TestCase): """Check the byteorder in unicode (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class test_byteorder_1_ucs4(byteorder_values, TestCase): """Check the byteorder in unicode (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class test_byteorder_2_ucs2(byteorder_values, TestCase): """Check the byteorder in unicode (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class test_byteorder_2_ucs4(byteorder_values, TestCase): """Check the byteorder in unicode (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class test_byteorder_1009_ucs2(byteorder_values, TestCase): """Check the byteorder in unicode (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class test_byteorder_1009_ucs4(byteorder_values, TestCase): """Check the byteorder in unicode (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value if __name__ == "__main__": run_module_suite()
anaviltripathi/pgmpy
refs/heads/dev
pgmpy/models/ClusterGraph.py
1
#!/usr/bin/env python3 from collections import defaultdict import numpy as np from pgmpy.base import UndirectedGraph from pgmpy.factors import factor_product from pgmpy.extern.six.moves import filter, range, zip class ClusterGraph(UndirectedGraph): r""" Base class for representing Cluster Graph. Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected edges that connects clusters whose scopes have a non-empty intersection. Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted :math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i` and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`. Parameters ---------- data: input graph Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list Examples -------- Create an empty ClusterGraph with no nodes and no edges >>> from pgmpy.models import ClusterGraph >>> G = ClusterGraph() G can be grown by adding clique nodes. **Nodes:** Add a tuple (or list or set) of nodes as single clique node. >>> G.add_node(('a', 'b', 'c')) >>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')]) **Edges:** G can also be grown by adding edges. >>> G.add_edge(('a', 'b', 'c'), ('a', 'b')) or a list of edges >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) """ def __init__(self, ebunch=None): super(ClusterGraph, self).__init__() if ebunch: self.add_edges_from(ebunch) self.factors = [] def add_node(self, node, **kwargs): """ Add a single node to the cluster graph. Parameters ---------- node: node A node should be a collection of nodes forming a clique. It can be a list, set or tuple of nodes Examples -------- >>> from pgmpy.models import ClusterGraph >>> G = ClusterGraph() >>> G.add_node(('a', 'b', 'c')) """ if not isinstance(node, (list, set, tuple)): raise TypeError('Node can only be a list, set or tuple of nodes forming a clique') node = tuple(node) super(ClusterGraph, self).add_node(node, **kwargs) def add_nodes_from(self, nodes, **kwargs): """ Add multiple nodes to the cluster graph. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, etc.). Examples -------- >>> from pgmpy.models import ClusterGraph >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')]) """ for node in nodes: self.add_node(node, **kwargs) def add_edge(self, u, v, **kwargs): """ Add an edge between two clique nodes. Parameters ---------- u, v: nodes Nodes can be any list or set or tuple of nodes forming a clique. Examples -------- >>> from pgmpy.models import ClusterGraph >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')]) >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) """ set_u = set(u) set_v = set(v) if set_u.isdisjoint(set_v): raise ValueError('No sepset found between these two edges.') super(ClusterGraph, self).add_edge(u, v) def add_factors(self, *factors): """ Associate a factor to the graph. See factors class for the order of potential values Parameters ---------- *factor: pgmpy.factors.factors object A factor object on any subset of the variables of the model which is to be associated with the model. Returns ------- None Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors import Factor >>> student = ClusterGraph() >>> student.add_node(('Alice', 'Bob')) >>> factor = Factor(['Alice', 'Bob'], cardinality=[3, 2], ... values=np.random.rand(6)) >>> student.add_factors(factor) """ for factor in factors: factor_scope = set(factor.scope()) nodes = [set(node) for node in self.nodes()] if factor_scope not in nodes: raise ValueError('Factors defined on clusters of variable not' 'present in model') self.factors.append(factor) def get_factors(self, node=None): """ Return the factors that have been added till now to the graph. If node is not None, it would return the factor corresponding to the given node. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors import Factor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')]) >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) >>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8)) >>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2, phi3) >>> G.get_factors() >>> G.get_factors(node=('a', 'b', 'c')) """ if node is None: return self.factors else: nodes = [set(n) for n in self.nodes()] if set(node) not in nodes: raise ValueError('Node not present in Cluster Graph') factors = filter(lambda x: set(x.scope()) == set(node), self.factors) return next(factors) def remove_factors(self, *factors): """ Removes the given factors from the added factors. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors import Factor >>> student = ClusterGraph() >>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2], ... value=np.random.rand(4)) >>> student.add_factors(factor) >>> student.remove_factors(factor) """ for factor in factors: self.factors.remove(factor) def get_cardinality(self, check_cardinality=False): """ Returns a dictionary with the given factors as keys and their respective cardinality as values. Parameters ---------- check_cardinality: boolean, optional If, check_cardinality=True it checks if cardinality information for all the variables is availble or not. If not it raises an error. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors import Factor >>> student = ClusterGraph() >>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2], ... values=np.random.rand(4)) >>> student.add_node(('Alice', 'Bob')) >>> student.add_factors(factor) >>> student.get_cardinality() defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2}) """ cardinalities = defaultdict(int) for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): cardinalities[variable] = cardinality if check_cardinality and len(set((x for clique in self.nodes() for x in clique))) != len(cardinalities): raise ValueError('Factors for all the variables not defined.') return cardinalities def get_partition_function(self): r""" Returns the partition function for a given undirected graph. A partition function is defined as .. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i) where m is the number of factors present in the graph and X are all the random variables present. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors import Factor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')]) >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) >>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8)) >>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2, phi3) >>> G.get_partition_function() """ if self.check_model(): factor = self.factors[0] factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))]) return np.sum(factor.values) def check_model(self): """ Check the model for various errors. This method checks for the following errors. * Checks if factors are defined for all the cliques or not. * Check for running intersection property is not done explicitly over here as it done in the add_edges method. * Check if cardinality of random variable remains same across all the factors. Returns ------- check: boolean True if all the checks are passed """ for clique in self.nodes(): factors = filter(lambda x: set(x.scope()) == set(clique), self.factors) if not any(factors): raise ValueError('Factors for all the cliques or clusters not defined.') cardinalities = self.get_cardinality() for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): if (cardinalities[variable] != cardinality): raise ValueError( 'Cardinality of variable {var} not matching among factors'.format(var=variable)) return True def copy(self): """ Returns a copy of ClusterGraph. Returns ------- ClusterGraph: copy of ClusterGraph Examples ------- >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b'), ('b', 'c')]) >>> G.add_edge(('a', 'b'), ('b', 'c')) >>> phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2) >>> graph_copy = G.copy() >>> graph_copy.factors [<Factor representing phi(a:2, b:2) at 0xb71b19cc>, <Factor representing phi(b:2, c:2) at 0xb4eaf3ac>] >>> graph_copy.edges() [(('a', 'b'), ('b', 'c'))] >>> graph_copy.nodes() [('a', 'b'), ('b', 'c')] """ copy = ClusterGraph(self.edges()) if self.factors: factors_copy = [factor.copy() for factor in self.factors] copy.add_factors(*factors_copy) return copy
sixohsix/twitter
refs/heads/master
twitter/ircbot.py
19
""" twitterbot A twitter IRC bot. Twitterbot connected to an IRC server and idles in a channel, polling a twitter account and broadcasting all updates to friends. USAGE twitterbot [config_file] CONFIG_FILE The config file is an ini-style file that must contain the following: [irc] server: <irc_server> port: <irc_port> nick: <irc_nickname> channel: <irc_channels_to_join> prefixes: <prefix_type> [twitter] oauth_token_file: <oauth_token_filename> If no config file is given "twitterbot.ini" will be used by default. The channel argument can accept multiple channels separated by commas. The default token file is ~/.twitterbot_oauth. The default prefix type is 'cats'. You can also use 'none'. """ from __future__ import print_function BOT_VERSION = "TwitterBot 1.9.1 (http://mike.verdone.ca/twitter)" CONSUMER_KEY = "XryIxN3J2ACaJs50EizfLQ" CONSUMER_SECRET = "j7IuDCNjftVY8DBauRdqXs4jDl5Fgk1IJRag8iE" IRC_BOLD = chr(0x02) IRC_ITALIC = chr(0x16) IRC_UNDERLINE = chr(0x1f) IRC_REGULAR = chr(0x0f) import sys import time from datetime import datetime, timedelta from email.utils import parsedate try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from heapq import heappop, heappush import traceback import os import os.path from .api import Twitter, TwitterError from .oauth import OAuth, read_token_file from .oauth_dance import oauth_dance from .util import htmlentitydecode PREFIXES = dict( cats=dict( new_tweet="=^_^= ", error="=O_o= ", inform="=o_o= " ), none=dict( new_tweet="" ), ) ACTIVE_PREFIXES=dict() def get_prefix(prefix_typ=None): return ACTIVE_PREFIXES.get(prefix_typ, ACTIVE_PREFIXES.get('new_tweet', '')) try: import irclib except ImportError: raise ImportError( "This module requires python irclib available from " + "https://github.com/sixohsix/python-irclib/zipball/python-irclib3-0.4.8") OAUTH_FILE = os.environ.get('HOME', os.environ.get('USERPROFILE', '')) + os.sep + '.twitterbot_oauth' def debug(msg): # uncomment this for debug text stuff # print(msg, file=sys.stdout) pass class SchedTask(object): def __init__(self, task, delta): self.task = task self.delta = delta self.next = time.time() def __repr__(self): return "<SchedTask %s next:%i delta:%i>" %( self.task.__name__, self.__next__, self.delta) def __lt__(self, other): return self.next < other.next def __call__(self): return self.task() class Scheduler(object): def __init__(self, tasks): self.task_heap = [] for task in tasks: heappush(self.task_heap, task) def next_task(self): now = time.time() task = heappop(self.task_heap) wait = task.next - now task.next = now + task.delta heappush(self.task_heap, task) if (wait > 0): time.sleep(wait) task() #debug("tasks: " + str(self.task_heap)) def run_forever(self): while True: self.next_task() class TwitterBot(object): def __init__(self, configFilename): self.configFilename = configFilename self.config = load_config(self.configFilename) global ACTIVE_PREFIXES ACTIVE_PREFIXES = PREFIXES[self.config.get('irc', 'prefixes')] oauth_file = self.config.get('twitter', 'oauth_token_file') if not os.path.exists(oauth_file): oauth_dance("IRC Bot", CONSUMER_KEY, CONSUMER_SECRET, oauth_file) oauth_token, oauth_secret = read_token_file(oauth_file) self.twitter = Twitter( auth=OAuth( oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET), domain='api.twitter.com') self.irc = irclib.IRC() self.irc.add_global_handler('privmsg', self.handle_privmsg) self.irc.add_global_handler('ctcp', self.handle_ctcp) self.irc.add_global_handler('umode', self.handle_umode) self.ircServer = self.irc.server() self.sched = Scheduler( (SchedTask(self.process_events, 1), SchedTask(self.check_statuses, 120))) self.lastUpdate = (datetime.utcnow() - timedelta(minutes=10)).utctimetuple() def check_statuses(self): debug("In check_statuses") try: updates = reversed(self.twitter.statuses.home_timeline()) except Exception as e: print("Exception while querying twitter:", file=sys.stderr) traceback.print_exc(file=sys.stderr) return nextLastUpdate = self.lastUpdate for update in updates: crt = parsedate(update['created_at']) if (crt > nextLastUpdate): text = (htmlentitydecode( update['text'].replace('\n', ' ')) .encode('utf8', 'replace')) # Skip updates beginning with @ # TODO This would be better if we only ignored messages # to people who are not on our following list. if not text.startswith(b"@"): msg = "%s %s%s%s %s" %( get_prefix(), IRC_BOLD, update['user']['screen_name'], IRC_BOLD, text.decode('utf8')) self.privmsg_channels(msg) nextLastUpdate = crt self.lastUpdate = nextLastUpdate def process_events(self): self.irc.process_once() def handle_privmsg(self, conn, evt): debug('got privmsg') args = evt.arguments()[0].split(' ') try: if (not args): return if (args[0] == 'follow' and args[1:]): self.follow(conn, evt, args[1]) elif (args[0] == 'unfollow' and args[1:]): self.unfollow(conn, evt, args[1]) else: conn.privmsg( evt.source().split('!')[0], "%sHi! I'm Twitterbot! you can (follow " "<twitter_name>) to make me follow a user or " "(unfollow <twitter_name>) to make me stop." % get_prefix()) except Exception: traceback.print_exc(file=sys.stderr) def handle_ctcp(self, conn, evt): args = evt.arguments() source = evt.source().split('!')[0] if (args): if args[0] == 'VERSION': conn.ctcp_reply(source, "VERSION " + BOT_VERSION) elif args[0] == 'PING': conn.ctcp_reply(source, "PING") elif args[0] == 'CLIENTINFO': conn.ctcp_reply(source, "CLIENTINFO PING VERSION CLIENTINFO") def handle_umode(self, conn, evt): """ QuakeNet ignores all your commands until after the MOTD. This handler defers joining until after it sees a magic line. It also tries to join right after connect, but this will just make it join again which should be safe. """ args = evt.arguments() if (args and args[0] == '+i'): channels = self.config.get('irc', 'channel').split(',') for channel in channels: self.ircServer.join(channel) def privmsg_channels(self, msg): return_response=True channels=self.config.get('irc','channel').split(',') return self.ircServer.privmsg_many(channels, msg.encode('utf8')) def follow(self, conn, evt, name): userNick = evt.source().split('!')[0] friends = [x['name'] for x in self.twitter.statuses.friends()] debug("Current friends: %s" %(friends)) if (name in friends): conn.privmsg( userNick, "%sI'm already following %s." %(get_prefix('error'), name)) else: try: self.twitter.friendships.create(screen_name=name) except TwitterError: conn.privmsg( userNick, "%sI can't follow that user. Are you sure the name is correct?" %( get_prefix('error') )) return conn.privmsg( userNick, "%sOkay! I'm now following %s." %(get_prefix('followed'), name)) self.privmsg_channels( "%s%s has asked me to start following %s" %( get_prefix('inform'), userNick, name)) def unfollow(self, conn, evt, name): userNick = evt.source().split('!')[0] friends = [x['name'] for x in self.twitter.statuses.friends()] debug("Current friends: %s" %(friends)) if (name not in friends): conn.privmsg( userNick, "%sI'm not following %s." %(get_prefix('error'), name)) else: self.twitter.friendships.destroy(screen_name=name) conn.privmsg( userNick, "%sOkay! I've stopped following %s." %( get_prefix('stop_follow'), name)) self.privmsg_channels( "%s%s has asked me to stop following %s" %( get_prefix('inform'), userNick, name)) def _irc_connect(self): self.ircServer.connect( self.config.get('irc', 'server'), self.config.getint('irc', 'port'), self.config.get('irc', 'nick')) channels=self.config.get('irc', 'channel').split(',') for channel in channels: self.ircServer.join(channel) def run(self): self._irc_connect() while True: try: self.sched.run_forever() except KeyboardInterrupt: break except TwitterError: # twitter.com is probably down because it # sucks. ignore the fault and keep going pass except irclib.ServerNotConnectedError: # Try and reconnect to IRC. self._irc_connect() def load_config(filename): # Note: Python ConfigParser module has the worst interface in the # world. Mega gross. cp = ConfigParser() cp.add_section('irc') cp.set('irc', 'port', '6667') cp.set('irc', 'nick', 'twitterbot') cp.set('irc', 'prefixes', 'cats') cp.add_section('twitter') cp.set('twitter', 'oauth_token_file', OAUTH_FILE) cp.read((filename,)) # attempt to read these properties-- they are required cp.get('twitter', 'oauth_token_file'), cp.get('irc', 'server') cp.getint('irc', 'port') cp.get('irc', 'nick') cp.get('irc', 'channel') return cp # So there was a joke here about the twitter business model # but I got rid of it. Not because I want this codebase to # be "professional" in any way, but because someone forked # this and deleted the comment because they couldn't take # a joke. Hi guy! # # Fact: The number one use of Google Code is to look for that # comment in the Linux kernel that goes "FUCK me gently with # a chainsaw." Pretty sure Linus himself wrote it. def main(): configFilename = "twitterbot.ini" if (sys.argv[1:]): configFilename = sys.argv[1] try: if not os.path.exists(configFilename): raise Exception() load_config(configFilename) except Exception as e: print("Error while loading ini file %s" %( configFilename), file=sys.stderr) print(e, file=sys.stderr) print(__doc__, file=sys.stderr) sys.exit(1) bot = TwitterBot(configFilename) return bot.run()
kishoredbn/barrelfish
refs/heads/master
tools/harness/checkout.py
10
########################################################################## # Copyright (c) 2009, ETH Zurich. # All rights reserved. # # This file is distributed under the terms in the attached LICENSE file. # If you do not find this file, copies can be found by writing to: # ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. ########################################################################## try: from mercurial import hg, ui, node, error, commands mercurial_module = True except ImportError: mercurial_module = False class Checkout: '''Checkout class: Maintain information about revision number, and directory locations ''' def __init__(self, base_dir): # Set parameters self.base_dir = base_dir if mercurial_module: try: self.repo = hg.repository(ui.ui(), base_dir) except error.RepoError: self.repo = None def get_base_dir(self): return self.base_dir def describe(self): if not mercurial_module: return '(mercurial module not available)' elif not self.repo: return '(repository information not available)' # identify the parents of the working revision context = self.repo[None] parents = context.parents() s = ', '.join(map(lambda p: node.short(p.node()), parents)) if context.files() or context.deleted(): s += ' with local changes' else: s += ' unmodified' return s def changes(self): if not mercurial_module or not self.repo: return None context = self.repo[None] if not context.files() and not context.deleted(): return None diffui = ui.ui() diffui.pushbuffer() commands.diff(diffui, self.repo, git=True) return diffui.popbuffer()
danylaksono/inasafe
refs/heads/master
realtime/sftp_client.py
5
""" InaSAFE Disaster risk assessment tool developed by AusAid and World Bank - **Ftp Client for Retrieving ftp data.** Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'imajimatika@gmail.com' __version__ = '0.5.0' __date__ = '10/01/2013' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') import sys import paramiko import ntpath from stat import S_ISDIR from errno import ENOENT import os import logging from utils import mkDir # The logger is intialised in utils.py by init LOGGER = logging.getLogger('InaSAFE') my_host = '118.97.83.243' my_username = 'geospasial' try: my_password = os.environ['QUAKE_SERVER_PASSWORD'] except KeyError: LOGGER.exception('QUAKE_SERVER_PASSWORD not set!') sys.exit() my_remote_path = 'shakemaps' class SFtpClient: """A utility class that contains methods to fetch a listings and files from an SSH protocol""" def __init__(self, the_host=my_host, the_username=my_username, the_password=my_password, the_working_dir=my_remote_path): self.host = the_host self.username = the_username self.password = the_password self.working_dir = the_working_dir # create transport object self.transport = paramiko.Transport(self.host) self.transport.connect(username=self.username, password=self.password) # create sftp object self.sftp = paramiko.SFTPClient.from_transport(self.transport) # go to remote_path folder, this is the default folder if not self.working_dir is None: self.sftp.chdir(self.working_dir) self.workdir_path = self.sftp.getcwd() def download_path(self, remote_path, local_path): """ Download remote_dir to local_dir. for example : remote_path = '20130111133900' will be download to local_dir/remote_path Must be in the parent directory of remote dir. """ # Check if remote_dir is exist if not self.is_path_exist(remote_path): print 'remote path is not exist %s' % remote_path return False if self.is_dir(remote_path): # get directory name dir_name = get_path_tail(remote_path) # create directory in local machine local_dir_path = os.path.join(local_path, dir_name) mkDir(local_dir_path) # list all directory in remote path list_dir = self.sftp.listdir(remote_path) # iterate recursive for my_dir in list_dir: new_remote_path = os.path.join(remote_path, my_dir) self.download_path(new_remote_path, local_dir_path) else: # download file to local_path file_name = get_path_tail(remote_path) local_file_path = os.path.join(local_path, file_name) LOGGER.info('file %s will be downloaded to %s' % (remote_path, local_file_path)) self.sftp.get(remote_path, local_file_path) def is_dir(self, path): """Check if a path is a directory or not in sftp Reference: http://stackoverflow.com/a/8307575/1198772 """ try: return S_ISDIR(self.sftp.stat(path).st_mode) except IOError: #Path does not exist, so by definition not a directory return False def is_path_exist(self, path): """os.path.exists for paramiko's SCP object Reference: http://stackoverflow.com/q/850749/1198772 """ try: self.sftp.stat(path) except IOError, e: if e.errno == ENOENT: return False raise else: return True def getListing(self, remote_dir=None, my_func=None): """Return list of files and directories name under a remote_dir and return true when it is input to my_func """ if remote_dir is None: remote_dir = self.workdir_path if self.is_path_exist(remote_dir): temp_list = self.sftp.listdir(remote_dir) else: LOGGER.debug('Directory %s is not exist, return None' % remote_dir) return None retval = [] for my_temp in temp_list: if my_func(my_temp): retval.append(my_temp) return retval def get_path_tail(path): '''Return tail of a path Reference : http://stackoverflow.com/a/8384788/1198772 ''' head, tail = ntpath.split(path) return tail or ntpath.basename(head)
mrfuxi/django
refs/heads/master
tests/project_template/test_settings.py
274
import unittest from django.test import TestCase from django.utils import six @unittest.skipIf(six.PY2, 'Python 2 cannot import the project template because ' 'django/conf/project_template doesn\'t have an __init__.py file.') class TestStartProjectSettings(TestCase): def test_middleware_classes_headers(self): """ Ensure headers sent by the default MIDDLEWARE_CLASSES do not inadvertently change. For example, we never want "Vary: Cookie" to appear in the list since it prevents the caching of responses. """ from django.conf.project_template.project_name.settings import MIDDLEWARE_CLASSES with self.settings( MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES, ROOT_URLCONF='project_template.urls', ): response = self.client.get('/empty/') headers = sorted(response.serialize_headers().split(b'\r\n')) self.assertEqual(headers, [ b'Content-Type: text/html; charset=utf-8', b'X-Frame-Options: SAMEORIGIN', ])
flx2015/ns-3-dev-git
refs/heads/master
src/lte/bindings/modulegen__gcc_ILP32.py
14
null
MattDevo/edk2
refs/heads/master
AppPkg/Applications/Python/Python-2.7.2/Lib/test/tf_inherit_check.py
14
# Helper script for test_tempfile.py. argv[2] is the number of a file # descriptor which should _not_ be open. Check this by attempting to # write to it -- if we succeed, something is wrong. import sys import os verbose = (sys.argv[1] == 'v') try: fd = int(sys.argv[2]) try: os.write(fd, "blat") except os.error: # Success -- could not write to fd. sys.exit(0) else: if verbose: sys.stderr.write("fd %d is open in child" % fd) sys.exit(1) except StandardError: if verbose: raise sys.exit(1)
lulandco/SickRage
refs/heads/develop
lib/babelfish/converters/name.py
88
# -*- coding: utf-8 -*- # # Copyright (c) 2013 the BabelFish authors. All rights reserved. # Use of this source code is governed by the 3-clause BSD license # that can be found in the LICENSE file. # from __future__ import unicode_literals from . import LanguageEquivalenceConverter from ..language import LANGUAGE_MATRIX class NameConverter(LanguageEquivalenceConverter): CASE_SENSITIVE = False SYMBOLS = {} for iso_language in LANGUAGE_MATRIX: if iso_language.name: SYMBOLS[iso_language.alpha3] = iso_language.name
szibis/Diamond
refs/heads/master
src/collectors/ksm/ksm.py
69
# coding=utf-8 """ This class collects 'Kernel Samepage Merging' statistics. KSM is a memory de-duplication feature of the Linux Kernel (2.6.32+). It can be enabled, if compiled into your kernel, by echoing 1 to /sys/kernel/mm/ksm/run. You can find more information about KSM at [http://www.linux-kvm.org/page/KSM](http://www.linux-kvm.org/page/KSM). #### Dependencies * KSM built into your kernel. It does not have to be enabled, but the stats will be less than useful if it isn't:-) """ import os import glob import diamond.collector class KSMCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(KSMCollector, self).get_default_config_help() config_help.update({ 'ksm_path': "location where KSM kernel data can be found", }) return config_help def get_default_config(self): """ Return default config. path: Graphite path output ksm_path: location where KSM kernel data can be found """ config = super(KSMCollector, self).get_default_config() config.update({ 'path': 'ksm', 'ksm_path': '/sys/kernel/mm/ksm'}) return config def collect(self): for item in glob.glob(os.path.join(self.config['ksm_path'], "*")): if os.access(item, os.R_OK): filehandle = open(item) try: self.publish(os.path.basename(item), float(filehandle.readline().rstrip())) except ValueError: pass filehandle.close()
MFoster/breeze
refs/heads/master
django/contrib/sessions/tests.py
28
from datetime import timedelta import os import shutil import string import tempfile import warnings from django.conf import settings from django.contrib.sessions.backends.db import SessionStore as DatabaseSession from django.contrib.sessions.backends.cache import SessionStore as CacheSession from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession from django.contrib.sessions.backends.file import SessionStore as FileSession from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession from django.contrib.sessions.models import Session from django.contrib.sessions.middleware import SessionMiddleware from django.core.cache import get_cache from django.core import management from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.http import HttpResponse from django.test import TestCase, RequestFactory from django.test.utils import override_settings from django.utils import six from django.utils import timezone from django.utils import unittest class SessionTestsMixin(object): # This does not inherit from TestCase to avoid any tests being run with this # class, which wouldn't work, and to allow different TestCase subclasses to # be used. backend = None # subclasses must specify def setUp(self): self.session = self.backend() def tearDown(self): # NB: be careful to delete any sessions created; stale sessions fill up # the /tmp (with some backends) and eventually overwhelm it after lots # of runs (think buildbots) self.session.delete() def test_new_session(self): self.assertFalse(self.session.modified) self.assertFalse(self.session.accessed) def test_get_empty(self): self.assertEqual(self.session.get('cat'), None) def test_store(self): self.session['cat'] = "dog" self.assertTrue(self.session.modified) self.assertEqual(self.session.pop('cat'), 'dog') def test_pop(self): self.session['some key'] = 'exists' # Need to reset these to pretend we haven't accessed it: self.accessed = False self.modified = False self.assertEqual(self.session.pop('some key'), 'exists') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('some key'), None) def test_pop_default(self): self.assertEqual(self.session.pop('some key', 'does not exist'), 'does not exist') self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_setdefault(self): self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar') self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_update(self): self.session.update({'update key': 1}) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('update key', None), 1) def test_has_key(self): self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertIn('some key', self.session) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_values(self): self.assertEqual(list(self.session.values()), []) self.assertTrue(self.session.accessed) self.session['some key'] = 1 self.assertEqual(list(self.session.values()), [1]) def test_iterkeys(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iterkeys(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), ['x']) def test_itervalues(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.itervalues(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [1]) def test_iteritems(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iteritems(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [('x', 1)]) def test_clear(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.session.clear() self.assertEqual(list(self.session.items()), []) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_save(self): if (hasattr(self.session, '_cache') and'DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']): raise unittest.SkipTest("Session saving tests require a real cache backend") self.session.save() self.assertTrue(self.session.exists(self.session.session_key)) def test_delete(self): self.session.save() self.session.delete(self.session.session_key) self.assertFalse(self.session.exists(self.session.session_key)) def test_flush(self): self.session['foo'] = 'bar' self.session.save() prev_key = self.session.session_key self.session.flush() self.assertFalse(self.session.exists(prev_key)) self.assertNotEqual(self.session.session_key, prev_key) self.assertTrue(self.session.modified) self.assertTrue(self.session.accessed) def test_cycle(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_key = self.session.session_key prev_data = list(self.session.items()) self.session.cycle_key() self.assertNotEqual(self.session.session_key, prev_key) self.assertEqual(list(self.session.items()), prev_data) def test_invalid_key(self): # Submitting an invalid session key (either by guessing, or if the db has # removed the key) results in a new key being generated. try: session = self.backend('1') try: session.save() except AttributeError: self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.") self.assertNotEqual(session.session_key, '1') self.assertEqual(session.get('cat'), None) session.delete() finally: # Some backends leave a stale cache entry for the invalid # session key; make sure that entry is manually deleted session.delete('1') def test_session_key_is_read_only(self): def set_session_key(session): session.session_key = session._get_new_session_key() self.assertRaises(AttributeError, set_session_key, self.session) # Custom session expiry def test_default_expiry(self): # A normal session has a max age equal to settings self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) # So does a custom session with an idle expiration time of 0 (but it'll # expire at browser close) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_custom_expiry_seconds(self): modification = timezone.now() self.session.set_expiry(10) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_timedelta(self): modification = timezone.now() # Mock timezone.now, because set_expiry calls it on this code path. original_now = timezone.now try: timezone.now = lambda: modification self.session.set_expiry(timedelta(seconds=10)) finally: timezone.now = original_now date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_datetime(self): modification = timezone.now() self.session.set_expiry(modification + timedelta(seconds=10)) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_get_expire_at_browser_close(self): # Tests get_expire_at_browser_close with different settings and different # set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertFalse(self.session.get_expire_at_browser_close()) with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertTrue(self.session.get_expire_at_browser_close()) def test_decode(self): # Ensure we can decode what we encode data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session.decode(encoded), data) def test_actual_expiry(self): # Regression test for #19200 old_session_key = None new_session_key = None try: self.session['foo'] = 'bar' self.session.set_expiry(-timedelta(seconds=10)) self.session.save() old_session_key = self.session.session_key # With an expiry date in the past, the session expires instantly. new_session = self.backend(self.session.session_key) new_session_key = new_session.session_key self.assertNotIn('foo', new_session) finally: self.session.delete(old_session_key) self.session.delete(new_session_key) class DatabaseSessionTests(SessionTestsMixin, TestCase): backend = DatabaseSession def test_session_get_decoded(self): """ Test we can use Session.get_decoded to retrieve data stored in normal way """ self.session['x'] = 1 self.session.save() s = Session.objects.get(session_key=self.session.session_key) self.assertEqual(s.get_decoded(), {'x': 1}) def test_sessionmanager_save(self): """ Test SessionManager.save method """ # Create a session self.session['y'] = 1 self.session.save() s = Session.objects.get(session_key=self.session.session_key) # Change it Session.objects.save(s.session_key, {'y': 2}, s.expire_date) # Clear cache, so that it will be retrieved from DB del self.session._session_cache self.assertEqual(self.session['y'], 2) @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db") def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ self.assertEqual(0, Session.objects.count()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the database before clearsessions... self.assertEqual(2, Session.objects.count()) management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, Session.objects.count()) @override_settings(USE_TZ=True) class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests): pass class CacheDBSessionTests(SessionTestsMixin, TestCase): backend = CacheDBSession @unittest.skipIf('DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'], "Session saving tests require a real cache backend") def test_exists_searches_cache_first(self): self.session.save() with self.assertNumQueries(0): self.assertTrue(self.session.exists(self.session.session_key)) def test_load_overlong_key(self): # Some backends might issue a warning with warnings.catch_warnings(): warnings.simplefilter("ignore") self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) @override_settings(USE_TZ=True) class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests): pass # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class FileSessionTests(SessionTestsMixin, unittest.TestCase): backend = FileSession def setUp(self): # Do file session tests in an isolated directory, and kill it after we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp() # Reset the file session backend's internal caches if hasattr(self.backend, '_storage_path'): del self.backend._storage_path super(FileSessionTests, self).setUp() def tearDown(self): super(FileSessionTests, self).tearDown() settings.SESSION_FILE_PATH = self.original_session_file_path shutil.rmtree(self.temp_session_store) @override_settings( SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer") def test_configuration_check(self): del self.backend._storage_path # Make sure the file backend checks for a good storage dir self.assertRaises(ImproperlyConfigured, self.backend) def test_invalid_key_backslash(self): # Ensure we don't allow directory-traversal self.assertRaises(SuspiciousOperation, self.backend("a\\b\\c").load) def test_invalid_key_forwardslash(self): # Ensure we don't allow directory-traversal self.assertRaises(SuspiciousOperation, self.backend("a/b/c").load) @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file") def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ storage_path = self.backend._get_storage_path() file_prefix = settings.SESSION_COOKIE_NAME def count_sessions(): return len([session_file for session_file in os.listdir(storage_path) if session_file.startswith(file_prefix)]) self.assertEqual(0, count_sessions()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the filesystem before clearsessions... self.assertEqual(2, count_sessions()) management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, count_sessions()) class CacheSessionTests(SessionTestsMixin, unittest.TestCase): backend = CacheSession def test_load_overlong_key(self): # Some backends might issue a warning with warnings.catch_warnings(): warnings.simplefilter("ignore") self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) def test_default_cache(self): self.session.save() self.assertNotEqual(get_cache('default').get(self.session.cache_key), None) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'sessions': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): self.session.save() self.assertEqual(get_cache('default').get(self.session.cache_key), None) self.assertNotEqual(get_cache('sessions').get(self.session.cache_key), None) class SessionMiddlewareTests(unittest.TestCase): @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['secure']) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertIn('httponly', str(response.cookies[settings.SESSION_COOKIE_NAME])) @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertNotIn('httponly', str(response.cookies[settings.SESSION_COOKIE_NAME])) def test_session_save_on_500(self): request = RequestFactory().get('/') response = HttpResponse('Horrible error') response.status_code = 500 middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the value wasn't saved above. self.assertNotIn('hello', request.session.load()) class CookieSessionTests(SessionTestsMixin, TestCase): backend = CookieSession def test_save(self): """ This test tested exists() in the other session backends, but that doesn't make sense for us. """ pass def test_cycle(self): """ This test tested cycle_key() which would create a new session key for the same session data. But we can't invalidate previously signed cookies (other than letting them expire naturally) so testing for this behavior is meaningless. """ pass @unittest.expectedFailure def test_actual_expiry(self): # The cookie backend doesn't handle non-default expiry dates, see #19201 super(CookieSessionTests, self).test_actual_expiry()
Emilgardis/falloutsnip
refs/heads/develop
Vendor/IronPython/Lib/sre_constants.py
349
# # Secret Labs' Regular Expression Engine # # various symbols used by the regular expression engine. # run this script to update the _sre include files! # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" # update when constants are added or removed MAGIC = 20031017 # max code word in this release MAXREPEAT = 65535 # SRE standard exception (access as sre.error) # should this really be here? class error(Exception): pass # operators FAILURE = "failure" SUCCESS = "success" ANY = "any" ANY_ALL = "any_all" ASSERT = "assert" ASSERT_NOT = "assert_not" AT = "at" BIGCHARSET = "bigcharset" BRANCH = "branch" CALL = "call" CATEGORY = "category" CHARSET = "charset" GROUPREF = "groupref" GROUPREF_IGNORE = "groupref_ignore" GROUPREF_EXISTS = "groupref_exists" IN = "in" IN_IGNORE = "in_ignore" INFO = "info" JUMP = "jump" LITERAL = "literal" LITERAL_IGNORE = "literal_ignore" MARK = "mark" MAX_REPEAT = "max_repeat" MAX_UNTIL = "max_until" MIN_REPEAT = "min_repeat" MIN_UNTIL = "min_until" NEGATE = "negate" NOT_LITERAL = "not_literal" NOT_LITERAL_IGNORE = "not_literal_ignore" RANGE = "range" REPEAT = "repeat" REPEAT_ONE = "repeat_one" SUBPATTERN = "subpattern" MIN_REPEAT_ONE = "min_repeat_one" # positions AT_BEGINNING = "at_beginning" AT_BEGINNING_LINE = "at_beginning_line" AT_BEGINNING_STRING = "at_beginning_string" AT_BOUNDARY = "at_boundary" AT_NON_BOUNDARY = "at_non_boundary" AT_END = "at_end" AT_END_LINE = "at_end_line" AT_END_STRING = "at_end_string" AT_LOC_BOUNDARY = "at_loc_boundary" AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" AT_UNI_BOUNDARY = "at_uni_boundary" AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" # categories CATEGORY_DIGIT = "category_digit" CATEGORY_NOT_DIGIT = "category_not_digit" CATEGORY_SPACE = "category_space" CATEGORY_NOT_SPACE = "category_not_space" CATEGORY_WORD = "category_word" CATEGORY_NOT_WORD = "category_not_word" CATEGORY_LINEBREAK = "category_linebreak" CATEGORY_NOT_LINEBREAK = "category_not_linebreak" CATEGORY_LOC_WORD = "category_loc_word" CATEGORY_LOC_NOT_WORD = "category_loc_not_word" CATEGORY_UNI_DIGIT = "category_uni_digit" CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" CATEGORY_UNI_SPACE = "category_uni_space" CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" CATEGORY_UNI_WORD = "category_uni_word" CATEGORY_UNI_NOT_WORD = "category_uni_not_word" CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" OPCODES = [ # failure=0 success=1 (just because it looks better that way :-) FAILURE, SUCCESS, ANY, ANY_ALL, ASSERT, ASSERT_NOT, AT, BRANCH, CALL, CATEGORY, CHARSET, BIGCHARSET, GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, IN, IN_IGNORE, INFO, JUMP, LITERAL, LITERAL_IGNORE, MARK, MAX_UNTIL, MIN_UNTIL, NOT_LITERAL, NOT_LITERAL_IGNORE, NEGATE, RANGE, REPEAT, REPEAT_ONE, SUBPATTERN, MIN_REPEAT_ONE ] ATCODES = [ AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, AT_UNI_NON_BOUNDARY ] CHCODES = [ CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, CATEGORY_UNI_NOT_LINEBREAK ] def makedict(list): d = {} i = 0 for item in list: d[item] = i i = i + 1 return d OPCODES = makedict(OPCODES) ATCODES = makedict(ATCODES) CHCODES = makedict(CHCODES) # replacement operations for "ignore case" mode OP_IGNORE = { GROUPREF: GROUPREF_IGNORE, IN: IN_IGNORE, LITERAL: LITERAL_IGNORE, NOT_LITERAL: NOT_LITERAL_IGNORE } AT_MULTILINE = { AT_BEGINNING: AT_BEGINNING_LINE, AT_END: AT_END_LINE } AT_LOCALE = { AT_BOUNDARY: AT_LOC_BOUNDARY, AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY } AT_UNICODE = { AT_BOUNDARY: AT_UNI_BOUNDARY, AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY } CH_LOCALE = { CATEGORY_DIGIT: CATEGORY_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_SPACE, CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, CATEGORY_WORD: CATEGORY_LOC_WORD, CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK } CH_UNICODE = { CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_UNI_SPACE, CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, CATEGORY_WORD: CATEGORY_UNI_WORD, CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK } # flags SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) SRE_FLAG_IGNORECASE = 2 # case insensitive SRE_FLAG_LOCALE = 4 # honour system locale SRE_FLAG_MULTILINE = 8 # treat target as multiline string SRE_FLAG_DOTALL = 16 # treat target as a single string SRE_FLAG_UNICODE = 32 # use unicode locale SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments SRE_FLAG_DEBUG = 128 # debugging # flags for INFO primitive SRE_INFO_PREFIX = 1 # has prefix SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) SRE_INFO_CHARSET = 4 # pattern starts with character from given set if __name__ == "__main__": def dump(f, d, prefix): items = d.items() items.sort(key=lambda a: a[1]) for k, v in items: f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) f = open("sre_constants.h", "w") f.write("""\ /* * Secret Labs' Regular Expression Engine * * regular expression matching engine * * NOTE: This file is generated by sre_constants.py. If you need * to change anything in here, edit sre_constants.py and run it. * * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. * * See the _sre.c file for information on usage and redistribution. */ """) f.write("#define SRE_MAGIC %d\n" % MAGIC) dump(f, OPCODES, "SRE_OP") dump(f, ATCODES, "SRE") dump(f, CHCODES, "SRE") f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) f.close() print "done"
Sutil/netanimations
refs/heads/master
node_modules/node-gyp/gyp/PRESUBMIT.py
1369
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Top-level presubmit script for GYP. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl. """ PYLINT_BLACKLIST = [ # TODO: fix me. # From SCons, not done in google style. 'test/lib/TestCmd.py', 'test/lib/TestCommon.py', 'test/lib/TestGyp.py', ] PYLINT_DISABLED_WARNINGS = [ # TODO: fix me. # Many tests include modules they don't use. 'W0611', # Possible unbalanced tuple unpacking with sequence. 'W0632', # Attempting to unpack a non-sequence. 'W0633', # Include order doesn't properly include local files? 'F0401', # Some use of built-in names. 'W0622', # Some unused variables. 'W0612', # Operator not preceded/followed by space. 'C0323', 'C0322', # Unnecessary semicolon. 'W0301', # Unused argument. 'W0613', # String has no effect (docstring in wrong place). 'W0105', # map/filter on lambda could be replaced by comprehension. 'W0110', # Use of eval. 'W0123', # Comma not followed by space. 'C0324', # Access to a protected member. 'W0212', # Bad indent. 'W0311', # Line too long. 'C0301', # Undefined variable. 'E0602', # Not exception type specified. 'W0702', # No member of that name. 'E1101', # Dangerous default {}. 'W0102', # Cyclic import. 'R0401', # Others, too many to sort. 'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231', 'R0201', 'E0101', 'C0321', # ************* Module copy # W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect 'W0104', ] def CheckChangeOnUpload(input_api, output_api): report = [] report.extend(input_api.canned_checks.PanProjectChecks( input_api, output_api)) return report def CheckChangeOnCommit(input_api, output_api): report = [] # Accept any year number from 2009 to the current year. current_year = int(input_api.time.strftime('%Y')) allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1))) years_re = '(' + '|'.join(allowed_years) + ')' # The (c) is deprecated, but tolerate it until it's removed from all files. license = ( r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n' r'.*? Use of this source code is governed by a BSD-style license that ' r'can be\n' r'.*? found in the LICENSE file\.\n' ) % { 'year': years_re, } report.extend(input_api.canned_checks.PanProjectChecks( input_api, output_api, license_header=license)) report.extend(input_api.canned_checks.CheckTreeIsOpen( input_api, output_api, 'http://gyp-status.appspot.com/status', 'http://gyp-status.appspot.com/current')) import os import sys old_sys_path = sys.path try: sys.path = ['pylib', 'test/lib'] + sys.path blacklist = PYLINT_BLACKLIST if sys.platform == 'win32': blacklist = [os.path.normpath(x).replace('\\', '\\\\') for x in PYLINT_BLACKLIST] report.extend(input_api.canned_checks.RunPylint( input_api, output_api, black_list=blacklist, disabled_warnings=PYLINT_DISABLED_WARNINGS)) finally: sys.path = old_sys_path return report TRYBOTS = [ 'linux_try', 'mac_try', 'win_try', ] def GetPreferredTryMasters(_, change): return { 'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS }, }
saydulk/horizon
refs/heads/master
openstack_dashboard/test/integration_tests/tests/test_dashboard_help_redirection.py
49
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests import helpers class TestDashboardHelp(helpers.TestCase): def test_dashboard_help_redirection(self): """Verifies Help link redirects to the right URL.""" self.home_pg.go_to_help_page() self.home_pg.switch_window() self.assertEqual(self.CONFIG.dashboard.help_url, self.home_pg.get_url_current_page(), "help link did not redirect to the right URL") self.home_pg.close_window() self.home_pg.switch_window()
transifex/transifex-client
refs/heads/master
tests/test_processors.py
5
# -*- coding: utf-8 -*- """ Unit tests for processor functions. """ import unittest from txclib.processors import hostname_tld_migration, hostname_ssl_migration class TestHostname(unittest.TestCase): """Test for hostname processors.""" def test_tld_migration_needed(self): """ Test the tld migration of Transifex, when needed. """ hostnames = [ 'http://transifex.net', 'http://www.transifex.net', 'https://fedora.transifex.net', ] for h in hostnames: hostname = hostname_tld_migration(h) self.assertTrue(hostname.endswith('com')) orig_hostname = 'http://www.transifex.net/path/' hostname = hostname_tld_migration(orig_hostname) self.assertEqual(hostname, orig_hostname.replace('net', 'com', 1)) def test_tld_migration_needed(self): """ Test that unneeded tld migrations are detected correctly. """ hostnames = [ 'https://www.transifex.com', 'http://fedora.transifex.com', 'http://www.example.net/path/' ] for h in hostnames: hostname = hostname_tld_migration(h) self.assertEqual(hostname, h) def test_no_scheme_specified(self): """ Test that, if no scheme has been specified, the https one will be used. """ hostname = '//transifex.net' hostname = hostname_ssl_migration(hostname) self.assertTrue(hostname.startswith('https://')) def test_http_replacement(self): """Test the replacement of http with https.""" hostnames = [ 'http://transifex.com', 'http://transifex.net/http/', 'http://www.transifex.com/path/' ] for h in hostnames: hostname = hostname_ssl_migration(h) self.assertEqual(hostname[:8], 'https://') self.assertEqual(hostname[7:], h[6:]) def test_no_http_replacement_needed(self): """Test that http will not be replaces with https, when not needed.""" for h in ['http://example.com', 'http://example.com/http/']: hostname = hostname_ssl_migration(h) self.assertEqual(hostname, hostname)
lancezlin/ml_template_py
refs/heads/master
lib/python2.7/site-packages/IPython/core/magic.py
8
# encoding: utf-8 """Magic functions for InteractiveShell. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and # Copyright (C) 2001 Fernando Perez <fperez@colorado.edu> # Copyright (C) 2008 The IPython Development Team # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import os import re import sys import types from getopt import getopt, GetoptError from traitlets.config.configurable import Configurable from IPython.core import oinspect from IPython.core.error import UsageError from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2 from decorator import decorator from IPython.utils.ipstruct import Struct from IPython.utils.process import arg_split from IPython.utils.py3compat import string_types, iteritems from IPython.utils.text import dedent from traitlets import Bool, Dict, Instance, observe from logging import error #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # A dict we'll use for each class that has magics, used as temporary storage to # pass information between the @line/cell_magic method decorators and the # @magics_class class decorator, because the method decorators have no # access to the class when they run. See for more details: # http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class magics = dict(line={}, cell={}) magic_kinds = ('line', 'cell') magic_spec = ('line', 'cell', 'line_cell') magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2) #----------------------------------------------------------------------------- # Utility classes and functions #----------------------------------------------------------------------------- class Bunch: pass def on_off(tag): """Return an ON/OFF string for a 1/0 input. Simple utility function.""" return ['OFF','ON'][tag] def compress_dhist(dh): """Compress a directory history into a new one with at most 20 entries. Return a new list made from the first and last 10 elements of dhist after removal of duplicates. """ head, tail = dh[:-10], dh[-10:] newhead = [] done = set() for h in head: if h in done: continue newhead.append(h) done.add(h) return newhead + tail def needs_local_scope(func): """Decorator to mark magic functions which need to local scope to run.""" func.needs_local_scope = True return func #----------------------------------------------------------------------------- # Class and method decorators for registering magics #----------------------------------------------------------------------------- def magics_class(cls): """Class decorator for all subclasses of the main Magics class. Any class that subclasses Magics *must* also apply this decorator, to ensure that all the methods that have been decorated as line/cell magics get correctly registered in the class instance. This is necessary because when method decorators run, the class does not exist yet, so they temporarily store their information into a module global. Application of this class decorator copies that global data to the class instance and clears the global. Obviously, this mechanism is not thread-safe, which means that the *creation* of subclasses of Magic should only be done in a single-thread context. Instantiation of the classes has no restrictions. Given that these classes are typically created at IPython startup time and before user application code becomes active, in practice this should not pose any problems. """ cls.registered = True cls.magics = dict(line = magics['line'], cell = magics['cell']) magics['line'] = {} magics['cell'] = {} return cls def record_magic(dct, magic_kind, magic_name, func): """Utility function to store a function as a magic of a specific kind. Parameters ---------- dct : dict A dictionary with 'line' and 'cell' subdicts. magic_kind : str Kind of magic to be stored. magic_name : str Key to store the magic as. func : function Callable object to store. """ if magic_kind == 'line_cell': dct['line'][magic_name] = dct['cell'][magic_name] = func else: dct[magic_kind][magic_name] = func def validate_type(magic_kind): """Ensure that the given magic_kind is valid. Check that the given magic_kind is one of the accepted spec types (stored in the global `magic_spec`), raise ValueError otherwise. """ if magic_kind not in magic_spec: raise ValueError('magic_kind must be one of %s, %s given' % magic_kinds, magic_kind) # The docstrings for the decorator below will be fairly similar for the two # types (method and function), so we generate them here once and reuse the # templates below. _docstring_template = \ """Decorate the given {0} as {1} magic. The decorator can be used with or without arguments, as follows. i) without arguments: it will create a {1} magic named as the {0} being decorated:: @deco def foo(...) will create a {1} magic named `foo`. ii) with one string argument: which will be used as the actual name of the resulting magic:: @deco('bar') def foo(...) will create a {1} magic named `bar`. """ # These two are decorator factories. While they are conceptually very similar, # there are enough differences in the details that it's simpler to have them # written as completely standalone functions rather than trying to share code # and make a single one with convoluted logic. def _method_magic_marker(magic_kind): """Decorator factory for methods in Magics subclasses. """ validate_type(magic_kind) # This is a closure to capture the magic_kind. We could also use a class, # but it's overkill for just that one bit of state. def magic_deco(arg): call = lambda f, *a, **k: f(*a, **k) if callable(arg): # "Naked" decorator call (just @foo, no args) func = arg name = func.__name__ retval = decorator(call, func) record_magic(magics, magic_kind, name, name) elif isinstance(arg, string_types): # Decorator called with arguments (@foo('bar')) name = arg def mark(func, *a, **kw): record_magic(magics, magic_kind, name, func.__name__) return decorator(call, func) retval = mark else: raise TypeError("Decorator can only be called with " "string or function") return retval # Ensure the resulting decorator has a usable docstring magic_deco.__doc__ = _docstring_template.format('method', magic_kind) return magic_deco def _function_magic_marker(magic_kind): """Decorator factory for standalone functions. """ validate_type(magic_kind) # This is a closure to capture the magic_kind. We could also use a class, # but it's overkill for just that one bit of state. def magic_deco(arg): call = lambda f, *a, **k: f(*a, **k) # Find get_ipython() in the caller's namespace caller = sys._getframe(1) for ns in ['f_locals', 'f_globals', 'f_builtins']: get_ipython = getattr(caller, ns).get('get_ipython') if get_ipython is not None: break else: raise NameError('Decorator can only run in context where ' '`get_ipython` exists') ip = get_ipython() if callable(arg): # "Naked" decorator call (just @foo, no args) func = arg name = func.__name__ ip.register_magic_function(func, magic_kind, name) retval = decorator(call, func) elif isinstance(arg, string_types): # Decorator called with arguments (@foo('bar')) name = arg def mark(func, *a, **kw): ip.register_magic_function(func, magic_kind, name) return decorator(call, func) retval = mark else: raise TypeError("Decorator can only be called with " "string or function") return retval # Ensure the resulting decorator has a usable docstring ds = _docstring_template.format('function', magic_kind) ds += dedent(""" Note: this decorator can only be used in a context where IPython is already active, so that the `get_ipython()` call succeeds. You can therefore use it in your startup files loaded after IPython initializes, but *not* in the IPython configuration file itself, which is executed before IPython is fully up and running. Any file located in the `startup` subdirectory of your configuration profile will be OK in this sense. """) magic_deco.__doc__ = ds return magic_deco # Create the actual decorators for public use # These three are used to decorate methods in class definitions line_magic = _method_magic_marker('line') cell_magic = _method_magic_marker('cell') line_cell_magic = _method_magic_marker('line_cell') # These three decorate standalone functions and perform the decoration # immediately. They can only run where get_ipython() works register_line_magic = _function_magic_marker('line') register_cell_magic = _function_magic_marker('cell') register_line_cell_magic = _function_magic_marker('line_cell') #----------------------------------------------------------------------------- # Core Magic classes #----------------------------------------------------------------------------- class MagicsManager(Configurable): """Object that handles all magic-related functionality for IPython. """ # Non-configurable class attributes # A two-level dict, first keyed by magic type, then by magic function, and # holding the actual callable object as value. This is the dict used for # magic function dispatch magics = Dict() # A registry of the original objects that we've been given holding magics. registry = Dict() shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) auto_magic = Bool(True, help= "Automatically call line magics without requiring explicit % prefix" ).tag(config=True) @observe('auto_magic') def _auto_magic_changed(self, change): self.shell.automagic = change['new'] _auto_status = [ 'Automagic is OFF, % prefix IS needed for line magics.', 'Automagic is ON, % prefix IS NOT needed for line magics.'] user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True) def __init__(self, shell=None, config=None, user_magics=None, **traits): super(MagicsManager, self).__init__(shell=shell, config=config, user_magics=user_magics, **traits) self.magics = dict(line={}, cell={}) # Let's add the user_magics to the registry for uniformity, so *all* # registered magic containers can be found there. self.registry[user_magics.__class__.__name__] = user_magics def auto_status(self): """Return descriptive string with automagic status.""" return self._auto_status[self.auto_magic] def lsmagic(self): """Return a dict of currently available magic functions. The return dict has the keys 'line' and 'cell', corresponding to the two types of magics we support. Each value is a list of names. """ return self.magics def lsmagic_docs(self, brief=False, missing=''): """Return dict of documentation of magic functions. The return dict has the keys 'line' and 'cell', corresponding to the two types of magics we support. Each value is a dict keyed by magic name whose value is the function docstring. If a docstring is unavailable, the value of `missing` is used instead. If brief is True, only the first line of each docstring will be returned. """ docs = {} for m_type in self.magics: m_docs = {} for m_name, m_func in iteritems(self.magics[m_type]): if m_func.__doc__: if brief: m_docs[m_name] = m_func.__doc__.split('\n', 1)[0] else: m_docs[m_name] = m_func.__doc__.rstrip() else: m_docs[m_name] = missing docs[m_type] = m_docs return docs def register(self, *magic_objects): """Register one or more instances of Magics. Take one or more classes or instances of classes that subclass the main `core.Magic` class, and register them with IPython to use the magic functions they provide. The registration process will then ensure that any methods that have decorated to provide line and/or cell magics will be recognized with the `%x`/`%%x` syntax as a line/cell magic respectively. If classes are given, they will be instantiated with the default constructor. If your classes need a custom constructor, you should instanitate them first and pass the instance. The provided arguments can be an arbitrary mix of classes and instances. Parameters ---------- magic_objects : one or more classes or instances """ # Start by validating them to ensure they have all had their magic # methods registered at the instance level for m in magic_objects: if not m.registered: raise ValueError("Class of magics %r was constructed without " "the @register_magics class decorator") if isinstance(m, type): # If we're given an uninstantiated class m = m(shell=self.shell) # Now that we have an instance, we can register it and update the # table of callables self.registry[m.__class__.__name__] = m for mtype in magic_kinds: self.magics[mtype].update(m.magics[mtype]) def register_function(self, func, magic_kind='line', magic_name=None): """Expose a standalone function as magic function for IPython. This will create an IPython magic (line, cell or both) from a standalone function. The functions should have the following signatures: * For line magics: `def f(line)` * For cell magics: `def f(line, cell)` * For a function that does both: `def f(line, cell=None)` In the latter case, the function will be called with `cell==None` when invoked as `%f`, and with cell as a string when invoked as `%%f`. Parameters ---------- func : callable Function to be registered as a magic. magic_kind : str Kind of magic, one of 'line', 'cell' or 'line_cell' magic_name : optional str If given, the name the magic will have in the IPython namespace. By default, the name of the function itself is used. """ # Create the new method in the user_magics and register it in the # global table validate_type(magic_kind) magic_name = func.__name__ if magic_name is None else magic_name setattr(self.user_magics, magic_name, func) record_magic(self.magics, magic_kind, magic_name, func) def register_alias(self, alias_name, magic_name, magic_kind='line'): """Register an alias to a magic function. The alias is an instance of :class:`MagicAlias`, which holds the name and kind of the magic it should call. Binding is done at call time, so if the underlying magic function is changed the alias will call the new function. Parameters ---------- alias_name : str The name of the magic to be registered. magic_name : str The name of an existing magic. magic_kind : str Kind of magic, one of 'line' or 'cell' """ # `validate_type` is too permissive, as it allows 'line_cell' # which we do not handle. if magic_kind not in magic_kinds: raise ValueError('magic_kind must be one of %s, %s given' % magic_kinds, magic_kind) alias = MagicAlias(self.shell, magic_name, magic_kind) setattr(self.user_magics, alias_name, alias) record_magic(self.magics, magic_kind, alias_name, alias) # Key base class that provides the central functionality for magics. class Magics(Configurable): """Base class for implementing magic functions. Shell functions which can be reached as %function_name. All magic functions should accept a string, which they can parse for their own needs. This can make some functions easier to type, eg `%cd ../` vs. `%cd("../")` Classes providing magic functions need to subclass this class, and they MUST: - Use the method decorators `@line_magic` and `@cell_magic` to decorate individual methods as magic functions, AND - Use the class decorator `@magics_class` to ensure that the magic methods are properly registered at the instance level upon instance initialization. See :mod:`magic_functions` for examples of actual implementation classes. """ # Dict holding all command-line options for each magic. options_table = None # Dict for the mapping of magic names to methods, set by class decorator magics = None # Flag to check that the class decorator was properly applied registered = False # Instance of IPython shell shell = None def __init__(self, shell=None, **kwargs): if not(self.__class__.registered): raise ValueError('Magics subclass without registration - ' 'did you forget to apply @magics_class?') if shell is not None: if hasattr(shell, 'configurables'): shell.configurables.append(self) if hasattr(shell, 'config'): kwargs.setdefault('parent', shell) self.shell = shell self.options_table = {} # The method decorators are run when the instance doesn't exist yet, so # they can only record the names of the methods they are supposed to # grab. Only now, that the instance exists, can we create the proper # mapping to bound methods. So we read the info off the original names # table and replace each method name by the actual bound method. # But we mustn't clobber the *class* mapping, in case of multiple instances. class_magics = self.magics self.magics = {} for mtype in magic_kinds: tab = self.magics[mtype] = {} cls_tab = class_magics[mtype] for magic_name, meth_name in iteritems(cls_tab): if isinstance(meth_name, string_types): # it's a method name, grab it tab[magic_name] = getattr(self, meth_name) else: # it's the real thing tab[magic_name] = meth_name # Configurable **needs** to be initiated at the end or the config # magics get screwed up. super(Magics, self).__init__(**kwargs) def arg_err(self,func): """Print docstring if incorrect arguments were passed""" print('Error in arguments:') print(oinspect.getdoc(func)) def format_latex(self, strng): """Format a string for latex inclusion.""" # Characters that need to be escaped for latex: escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE) # Magic command names as headers: cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC, re.MULTILINE) # Magic commands cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC, re.MULTILINE) # Paragraph continue par_re = re.compile(r'\\$',re.MULTILINE) # The "\n" symbol newline_re = re.compile(r'\\n') # Now build the string for output: #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng) strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:', strng) strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng) strng = par_re.sub(r'\\\\',strng) strng = escape_re.sub(r'\\\1',strng) strng = newline_re.sub(r'\\textbackslash{}n',strng) return strng def parse_options(self, arg_str, opt_str, *long_opts, **kw): """Parse options passed to an argument string. The interface is similar to that of :func:`getopt.getopt`, but it returns a :class:`~IPython.utils.struct.Struct` with the options as keys and the stripped argument string still as a string. arg_str is quoted as a true sys.argv vector by using shlex.split. This allows us to easily expand variables, glob files, quote arguments, etc. Parameters ---------- arg_str : str The arguments to parse. opt_str : str The options specification. mode : str, default 'string' If given as 'list', the argument string is returned as a list (split on whitespace) instead of a string. list_all : bool, default False Put all option values in lists. Normally only options appearing more than once are put in a list. posix : bool, default True Whether to split the input line in POSIX mode or not, as per the conventions outlined in the :mod:`shlex` module from the standard library. """ # inject default options at the beginning of the input line caller = sys._getframe(1).f_code.co_name arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str) mode = kw.get('mode','string') if mode not in ['string','list']: raise ValueError('incorrect mode given: %s' % mode) # Get options list_all = kw.get('list_all',0) posix = kw.get('posix', os.name == 'posix') strict = kw.get('strict', True) # Check if we have more than one argument to warrant extra processing: odict = {} # Dictionary with options args = arg_str.split() if len(args) >= 1: # If the list of inputs only has 0 or 1 thing in it, there's no # need to look for options argv = arg_split(arg_str, posix, strict) # Do regular option processing try: opts,args = getopt(argv, opt_str, long_opts) except GetoptError as e: raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str, " ".join(long_opts))) for o,a in opts: if o.startswith('--'): o = o[2:] else: o = o[1:] try: odict[o].append(a) except AttributeError: odict[o] = [odict[o],a] except KeyError: if list_all: odict[o] = [a] else: odict[o] = a # Prepare opts,args for return opts = Struct(odict) if mode == 'string': args = ' '.join(args) return opts,args def default_option(self, fn, optstr): """Make an entry in the options_table for fn, with value optstr""" if fn not in self.lsmagic(): error("%s is not a magic function" % fn) self.options_table[fn] = optstr class MagicAlias(object): """An alias to another magic function. An alias is determined by its magic name and magic kind. Lookup is done at call time, so if the underlying magic changes the alias will call the new function. Use the :meth:`MagicsManager.register_alias` method or the `%alias_magic` magic function to create and register a new alias. """ def __init__(self, shell, magic_name, magic_kind): self.shell = shell self.magic_name = magic_name self.magic_kind = magic_kind self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name) self.__doc__ = "Alias for `%s`." % self.pretty_target self._in_call = False def __call__(self, *args, **kwargs): """Call the magic alias.""" fn = self.shell.find_magic(self.magic_name, self.magic_kind) if fn is None: raise UsageError("Magic `%s` not found." % self.pretty_target) # Protect against infinite recursion. if self._in_call: raise UsageError("Infinite recursion detected; " "magic aliases cannot call themselves.") self._in_call = True try: return fn(*args, **kwargs) finally: self._in_call = False
sbalde/edxplatform
refs/heads/master
common/lib/xmodule/xmodule/modulestore/mongo/base.py
11
""" Modulestore backed by Mongodb. Stores individual XModules as single documents with the following structure: { '_id': <location.as_dict>, 'metadata': <dict containing all Scope.settings fields> 'definition': <dict containing all Scope.content fields> 'definition.children': <list of all child location.to_deprecated_string()s> } """ import pymongo import sys import logging import copy import re from uuid import uuid4 from bson.son import SON from datetime import datetime from fs.osfs import OSFS from mongodb_proxy import MongoProxy, autoretry_read from path import Path as path from pytz import UTC from contracts import contract, new_contract from importlib import import_module from opaque_keys.edx.keys import UsageKey, CourseKey, AssetKey from opaque_keys.edx.locations import Location, BlockUsageLocator from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locator import CourseLocator, LibraryLocator from xblock.core import XBlock from xblock.exceptions import InvalidScopeError from xblock.fields import Scope, ScopeIds, Reference, ReferenceList, ReferenceValueDict from xblock.runtime import KvsFieldData from xmodule.assetstore import AssetMetadata, CourseAssetsFromStorage from xmodule.error_module import ErrorDescriptor from xmodule.errortracker import null_error_tracker, exc_info_to_str from xmodule.exceptions import HeartbeatFailure from xmodule.mako_module import MakoDescriptorSystem from xmodule.modulestore import ModuleStoreWriteBase, ModuleStoreEnum, BulkOperationsMixin, BulkOpsRecord from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES from xmodule.modulestore.edit_info import EditInfoRuntimeMixin from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError from xmodule.modulestore.inheritance import InheritanceMixin, inherit_metadata, InheritanceKeyValueStore from xmodule.modulestore.xml import CourseLocationManager from xmodule.services import SettingsService log = logging.getLogger(__name__) new_contract('CourseKey', CourseKey) new_contract('AssetKey', AssetKey) new_contract('AssetMetadata', AssetMetadata) new_contract('long', long) new_contract('BlockUsageLocator', BlockUsageLocator) # sort order that returns DRAFT items first SORT_REVISION_FAVOR_DRAFT = ('_id.revision', pymongo.DESCENDING) # sort order that returns PUBLISHED items first SORT_REVISION_FAVOR_PUBLISHED = ('_id.revision', pymongo.ASCENDING) BLOCK_TYPES_WITH_CHILDREN = list(set( name for name, class_ in XBlock.load_classes() if getattr(class_, 'has_children', False) )) # Allow us to call _from_deprecated_(son|string) throughout the file # pylint: disable=protected-access # at module level, cache one instance of OSFS per filesystem root. _OSFS_INSTANCE = {} _DETACHED_CATEGORIES = [name for name, __ in XBlock.load_tagged_classes("detached")] class MongoRevisionKey(object): """ Key Revision constants to use for Location and Usage Keys in the Mongo modulestore Note: These values are persisted in the database, so should not be changed without migrations """ draft = 'draft' published = None class InvalidWriteError(Exception): """ Raised to indicate that writing to a particular key in the KeyValueStore is disabled """ pass class MongoKeyValueStore(InheritanceKeyValueStore): """ A KeyValueStore that maps keyed data access to one of the 3 data areas known to the MongoModuleStore (data, children, and metadata) """ def __init__(self, data, parent, children, metadata): super(MongoKeyValueStore, self).__init__() if not isinstance(data, dict): self._data = {'data': data} else: self._data = data self._parent = parent self._children = children self._metadata = metadata def get(self, key): if key.scope == Scope.children: return self._children elif key.scope == Scope.parent: return self._parent elif key.scope == Scope.settings: return self._metadata[key.field_name] elif key.scope == Scope.content: return self._data[key.field_name] else: raise InvalidScopeError( key, (Scope.children, Scope.parent, Scope.settings, Scope.content), ) def set(self, key, value): if key.scope == Scope.children: self._children = value elif key.scope == Scope.settings: self._metadata[key.field_name] = value elif key.scope == Scope.content: self._data[key.field_name] = value else: raise InvalidScopeError( key, (Scope.children, Scope.settings, Scope.content), ) def delete(self, key): if key.scope == Scope.children: self._children = [] elif key.scope == Scope.settings: if key.field_name in self._metadata: del self._metadata[key.field_name] elif key.scope == Scope.content: if key.field_name in self._data: del self._data[key.field_name] else: raise InvalidScopeError( key, (Scope.children, Scope.settings, Scope.content), ) def has(self, key): if key.scope in (Scope.children, Scope.parent): return True elif key.scope == Scope.settings: return key.field_name in self._metadata elif key.scope == Scope.content: return key.field_name in self._data else: return False def __repr__(self): return "MongoKeyValueStore{!r}<{!r}, {!r}>".format( (self._data, self._parent, self._children, self._metadata), self._fields, self.inherited_settings ) class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin): """ A system that has a cache of module json that it will use to load modules from, with a backup of calling to the underlying modulestore for more data """ def __repr__(self): return "CachingDescriptorSystem{!r}".format(( self.modulestore, unicode(self.course_id), [unicode(key) for key in self.module_data.keys()], self.default_class, [unicode(key) for key in self.cached_metadata.keys()], )) def __init__(self, modulestore, course_key, module_data, default_class, cached_metadata, **kwargs): """ modulestore: the module store that can be used to retrieve additional modules course_key: the course for which everything in this runtime will be relative module_data: a dict mapping Location -> json that was cached from the underlying modulestore default_class: The default_class to use when loading an XModuleDescriptor from the module_data cached_metadata: the cache for handling inheritance computation. internal use only resources_fs: a filesystem, as per MakoDescriptorSystem error_tracker: a function that logs errors for later display to users render_template: a function for rendering templates, as per MakoDescriptorSystem """ id_manager = CourseLocationManager(course_key) kwargs.setdefault('id_reader', id_manager) kwargs.setdefault('id_generator', id_manager) super(CachingDescriptorSystem, self).__init__( field_data=None, load_item=self.load_item, **kwargs ) self.modulestore = modulestore self.module_data = module_data self.default_class = default_class # cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's # define an attribute here as well, even though it's None self.course_id = course_key self.cached_metadata = cached_metadata def load_item(self, location, for_parent=None): # pylint: disable=method-hidden """ Return an XModule instance for the specified location """ assert isinstance(location, UsageKey) if location.run is None: # self.module_data is keyed on locations that have full run information. # If the supplied location is missing a run, then we will miss the cache and # incur an additional query. # TODO: make module_data a proper class that can handle this itself. location = location.replace(course_key=self.modulestore.fill_in_run(location.course_key)) json_data = self.module_data.get(location) if json_data is None: module = self.modulestore.get_item(location, using_descriptor_system=self) return module else: # load the module and apply the inherited metadata try: category = json_data['location']['category'] class_ = self.load_block_type(category) definition = json_data.get('definition', {}) metadata = json_data.get('metadata', {}) for old_name, new_name in getattr(class_, 'metadata_translations', {}).items(): if old_name in metadata: metadata[new_name] = metadata[old_name] del metadata[old_name] children = [ self._convert_reference_to_key(childloc) for childloc in definition.get('children', []) ] parent = None if self.cached_metadata is not None: # fish the parent out of here if it's available parent_url = self.cached_metadata.get(unicode(location), {}).get('parent', {}).get( ModuleStoreEnum.Branch.published_only if location.revision is None else ModuleStoreEnum.Branch.draft_preferred ) if parent_url: parent = self._convert_reference_to_key(parent_url) if not parent and category != 'course': # try looking it up just-in-time (but not if we're working with a root node (course). parent = self.modulestore.get_parent_location( as_published(location), ModuleStoreEnum.RevisionOption.published_only if location.revision is None else ModuleStoreEnum.RevisionOption.draft_preferred ) data = definition.get('data', {}) if isinstance(data, basestring): data = {'data': data} mixed_class = self.mixologist.mix(class_) if data: # empty or None means no work data = self._convert_reference_fields_to_keys(mixed_class, location.course_key, data) metadata = self._convert_reference_fields_to_keys(mixed_class, location.course_key, metadata) kvs = MongoKeyValueStore( data, parent, children, metadata, ) field_data = KvsFieldData(kvs) scope_ids = ScopeIds(None, category, location, location) module = self.construct_xblock_from_class(class_, scope_ids, field_data, for_parent=for_parent) if self.cached_metadata is not None: # parent container pointers don't differentiate between draft and non-draft # so when we do the lookup, we should do so with a non-draft location non_draft_loc = as_published(location) # Convert the serialized fields values in self.cached_metadata # to python values metadata_to_inherit = self.cached_metadata.get(unicode(non_draft_loc), {}) inherit_metadata(module, metadata_to_inherit) module._edit_info = json_data.get('edit_info') # migrate published_by and published_on if edit_info isn't present if module._edit_info is None: module._edit_info = {} raw_metadata = json_data.get('metadata', {}) # published_on was previously stored as a list of time components instead of a datetime if raw_metadata.get('published_date'): module._edit_info['published_date'] = datetime( *raw_metadata.get('published_date')[0:6] ).replace(tzinfo=UTC) module._edit_info['published_by'] = raw_metadata.get('published_by') # decache any computed pending field settings module.save() return module except Exception: # pylint: disable=broad-except log.warning("Failed to load descriptor from %s", json_data, exc_info=True) return ErrorDescriptor.from_json( json_data, self, location, error_msg=exc_info_to_str(sys.exc_info()) ) def _convert_reference_to_key(self, ref_string): """ Convert a single serialized UsageKey string in a ReferenceField into a UsageKey. """ key = UsageKey.from_string(ref_string) return key.replace(run=self.modulestore.fill_in_run(key.course_key).run) def __setattr__(self, name, value): return super(CachingDescriptorSystem, self).__setattr__(name, value) def _convert_reference_fields_to_keys(self, class_, course_key, jsonfields): """ Find all fields of type reference and convert the payload into UsageKeys :param class_: the XBlock class :param course_key: a CourseKey object for the given course :param jsonfields: a dict of the jsonified version of the fields """ result = {} for field_name, value in jsonfields.iteritems(): field = class_.fields.get(field_name) if field is None: continue elif value is None: result[field_name] = value elif isinstance(field, Reference): result[field_name] = self._convert_reference_to_key(value) elif isinstance(field, ReferenceList): result[field_name] = [ self._convert_reference_to_key(ele) for ele in value ] elif isinstance(field, ReferenceValueDict): result[field_name] = { key: self._convert_reference_to_key(subvalue) for key, subvalue in value.iteritems() } else: result[field_name] = value return result def lookup_item(self, location): """ Returns the JSON payload of the xblock at location. """ try: json = self.module_data[location] except KeyError: json = self.modulestore._find_one(location) self.module_data[location] = json return json def get_edited_by(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('edited_by') def get_edited_on(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('edited_on') def get_subtree_edited_by(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('subtree_edited_by') def get_subtree_edited_on(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('subtree_edited_on') def get_published_by(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('published_by') def get_published_on(self, xblock): """ See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin """ return xblock._edit_info.get('published_date') def applicable_aside_types(self, block): # "old" mongo does support asides yet return [] new_contract('CachingDescriptorSystem', CachingDescriptorSystem) # The only thing using this w/ wildcards is contentstore.mongo for asset retrieval def location_to_query(location, wildcard=True, tag='i4x'): """ Takes a Location and returns a SON object that will query for that location by subfields rather than subdoc. Fields in location that are None are ignored in the query. If `wildcard` is True, then a None in a location is treated as a wildcard query. Otherwise, it is searched for literally """ query = location.to_deprecated_son(prefix='_id.', tag=tag) if wildcard: for key, value in query.items(): # don't allow wildcards on revision, since public is set as None, so # its ambiguous between None as a real value versus None=wildcard if value is None and key != '_id.revision': del query[key] return query def as_draft(location): """ Returns the Location that is the draft for `location` If the location is in the DIRECT_ONLY_CATEGORIES, returns itself """ if location.category in DIRECT_ONLY_CATEGORIES: return location return location.replace(revision=MongoRevisionKey.draft) def as_published(location): """ Returns the Location that is the published version for `location` """ return location.replace(revision=MongoRevisionKey.published) class MongoBulkOpsRecord(BulkOpsRecord): """ Tracks whether there've been any writes per course and disables inheritance generation """ def __init__(self): super(MongoBulkOpsRecord, self).__init__() self.dirty = False class MongoBulkOpsMixin(BulkOperationsMixin): """ Mongo bulk operation support """ _bulk_ops_record_type = MongoBulkOpsRecord def _start_outermost_bulk_operation(self, bulk_ops_record, course_key): """ Prevent updating the meta-data inheritance cache for the given course """ # ensure it starts clean bulk_ops_record.dirty = False def _end_outermost_bulk_operation(self, bulk_ops_record, structure_key): """ Restart updating the meta-data inheritance cache for the given course or library. Refresh the meta-data inheritance cache now since it was temporarily disabled. """ dirty = False if bulk_ops_record.dirty: self.refresh_cached_metadata_inheritance_tree(structure_key) dirty = True bulk_ops_record.dirty = False # brand spanking clean now return dirty def _is_in_bulk_operation(self, course_id, ignore_case=False): """ Returns whether a bulk operation is in progress for the given course. """ return super(MongoBulkOpsMixin, self)._is_in_bulk_operation( course_id.for_branch(None), ignore_case ) class ParentLocationCache(dict): """ Dict-based object augmented with a more cache-like interface, for internal use. """ # pylint: disable=missing-docstring @contract(key=unicode) def has(self, key): return key in self @contract(key=unicode, value="BlockUsageLocator | None") def set(self, key, value): self[key] = value @contract(value="BlockUsageLocator") def delete_by_value(self, value): keys_to_delete = [k for k, v in self.iteritems() if v == value] for key in keys_to_delete: del self[key] class MongoModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase, MongoBulkOpsMixin): """ A Mongodb backed ModuleStore """ # If no name is specified for the asset metadata collection, this name is used. DEFAULT_ASSET_COLLECTION_NAME = 'assetstore' # TODO (cpennington): Enable non-filesystem filestores # pylint: disable=invalid-name # pylint: disable=attribute-defined-outside-init def __init__(self, contentstore, doc_store_config, fs_root, render_template, default_class=None, error_tracker=null_error_tracker, i18n_service=None, fs_service=None, user_service=None, signal_handler=None, retry_wait_time=0.1, **kwargs): """ :param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware. """ super(MongoModuleStore, self).__init__(contentstore=contentstore, **kwargs) def do_connection( db, collection, host, port=27017, tz_aware=True, user=None, password=None, asset_collection=None, **kwargs ): """ Create & open the connection, authenticate, and provide pointers to the collection """ # Remove the replicaSet parameter. kwargs.pop('replicaSet', None) self.database = MongoProxy( pymongo.database.Database( pymongo.MongoClient( host=host, port=port, tz_aware=tz_aware, document_class=dict, **kwargs ), db ), wait_time=retry_wait_time ) self.collection = self.database[collection] # Collection which stores asset metadata. if asset_collection is None: asset_collection = self.DEFAULT_ASSET_COLLECTION_NAME self.asset_collection = self.database[asset_collection] if user is not None and password is not None: self.database.authenticate(user, password) do_connection(**doc_store_config) # Force mongo to report errors, at the expense of performance self.collection.write_concern = {'w': 1} if default_class is not None: module_path, _, class_name = default_class.rpartition('.') class_ = getattr(import_module(module_path), class_name) self.default_class = class_ else: self.default_class = None self.fs_root = path(fs_root) self.error_tracker = error_tracker self.render_template = render_template self.i18n_service = i18n_service self.fs_service = fs_service self.user_service = user_service self._course_run_cache = {} self.signal_handler = signal_handler def close_connections(self): """ Closes any open connections to the underlying database """ self.collection.database.connection.close() def mongo_wire_version(self): """ Returns the wire version for mongo. Only used to unit tests which instrument the connection. """ self.database.connection._ensure_connected() return self.database.connection.max_wire_version def _drop_database(self): """ A destructive operation to drop the underlying database and close all connections. Intended to be used by test code for cleanup. """ # drop the assets super(MongoModuleStore, self)._drop_database() connection = self.collection.database.connection connection.drop_database(self.collection.database.proxied_object) connection.close() @autoretry_read() def fill_in_run(self, course_key): """ In mongo some course_keys are used without runs. This helper function returns a course_key with the run filled in, if the course does actually exist. """ if course_key.run is not None: return course_key cache_key = (course_key.org, course_key.course) if cache_key not in self._course_run_cache: matching_courses = list(self.collection.find(SON([ ('_id.tag', 'i4x'), ('_id.org', course_key.org), ('_id.course', course_key.course), ('_id.category', 'course'), ])).limit(1)) if not matching_courses: return course_key self._course_run_cache[cache_key] = matching_courses[0]['_id']['name'] return course_key.replace(run=self._course_run_cache[cache_key]) def for_branch_setting(self, location): """ Returns the Location that is for the current branch setting. """ if location.category in DIRECT_ONLY_CATEGORIES: return location.replace(revision=MongoRevisionKey.published) if self.get_branch_setting() == ModuleStoreEnum.Branch.draft_preferred: return location.replace(revision=MongoRevisionKey.draft) return location.replace(revision=MongoRevisionKey.published) def _get_parent_cache(self, branch): """ Provides a reference to one of the two branch-specific ParentLocationCaches associated with the current request (if any). """ if self.request_cache is not None: return self.request_cache.data.setdefault('parent-location-{}'.format(branch), ParentLocationCache()) else: return ParentLocationCache() def _compute_metadata_inheritance_tree(self, course_id): ''' Find all inheritable fields from all xblocks in the course which may define inheritable data ''' # get all collections in the course, this query should not return any leaf nodes course_id = self.fill_in_run(course_id) query = SON([ ('_id.tag', 'i4x'), ('_id.org', course_id.org), ('_id.course', course_id.course), ('_id.category', {'$in': BLOCK_TYPES_WITH_CHILDREN}) ]) # if we're only dealing in the published branch, then only get published containers if self.get_branch_setting() == ModuleStoreEnum.Branch.published_only: query['_id.revision'] = None # we just want the Location, children, and inheritable metadata record_filter = {'_id': 1, 'definition.children': 1} # just get the inheritable metadata since that is all we need for the computation # this minimizes both data pushed over the wire for field_name in InheritanceMixin.fields: record_filter['metadata.{0}'.format(field_name)] = 1 # call out to the DB resultset = self.collection.find(query, record_filter) # it's ok to keep these as deprecated strings b/c the overall cache is indexed by course_key and this # is a dictionary relative to that course results_by_url = {} root = None # now go through the results and order them by the location url for result in resultset: # manually pick it apart b/c the db has tag and we want as_published revision regardless location = as_published(Location._from_deprecated_son(result['_id'], course_id.run)) location_url = unicode(location) if location_url in results_by_url: # found either draft or live to complement the other revision # FIXME this is wrong. If the child was moved in draft from one parent to the other, it will # show up under both in this logic: https://openedx.atlassian.net/browse/TNL-1075 existing_children = results_by_url[location_url].get('definition', {}).get('children', []) additional_children = result.get('definition', {}).get('children', []) total_children = existing_children + additional_children # use set to get rid of duplicates. We don't care about order; so, it shouldn't matter. results_by_url[location_url].setdefault('definition', {})['children'] = set(total_children) else: results_by_url[location_url] = result if location.category == 'course': root = location_url # now traverse the tree and compute down the inherited metadata metadata_to_inherit = {} def _compute_inherited_metadata(url): """ Helper method for computing inherited metadata for a specific location url """ my_metadata = results_by_url[url].get('metadata', {}) # go through all the children and recurse, but only if we have # in the result set. Remember results will not contain leaf nodes for child in results_by_url[url].get('definition', {}).get('children', []): if child in results_by_url: new_child_metadata = copy.deepcopy(my_metadata) new_child_metadata.update(results_by_url[child].get('metadata', {})) results_by_url[child]['metadata'] = new_child_metadata metadata_to_inherit[child] = new_child_metadata _compute_inherited_metadata(child) else: # this is likely a leaf node, so let's record what metadata we need to inherit metadata_to_inherit[child] = my_metadata.copy() # WARNING: 'parent' is not part of inherited metadata, but # we're piggybacking on this recursive traversal to grab # and cache the child's parent, as a performance optimization. # The 'parent' key will be popped out of the dictionary during # CachingDescriptorSystem.load_item metadata_to_inherit[child].setdefault('parent', {})[self.get_branch_setting()] = url if root is not None: _compute_inherited_metadata(root) return metadata_to_inherit def _get_cached_metadata_inheritance_tree(self, course_id, force_refresh=False): ''' Compute the metadata inheritance for the course. ''' tree = {} course_id = self.fill_in_run(course_id) if not force_refresh: # see if we are first in the request cache (if present) if self.request_cache is not None and unicode(course_id) in self.request_cache.data.get('metadata_inheritance', {}): return self.request_cache.data['metadata_inheritance'][unicode(course_id)] # then look in any caching subsystem (e.g. memcached) if self.metadata_inheritance_cache_subsystem is not None: tree = self.metadata_inheritance_cache_subsystem.get(unicode(course_id), {}) else: logging.warning( 'Running MongoModuleStore without a metadata_inheritance_cache_subsystem. This is \ OK in localdev and testing environment. Not OK in production.' ) if not tree: # if not in subsystem, or we are on force refresh, then we have to compute tree = self._compute_metadata_inheritance_tree(course_id) # now write out computed tree to caching subsystem (e.g. memcached), if available if self.metadata_inheritance_cache_subsystem is not None: self.metadata_inheritance_cache_subsystem.set(unicode(course_id), tree) # now populate a request_cache, if available. NOTE, we are outside of the # scope of the above if: statement so that after a memcache hit, it'll get # put into the request_cache if self.request_cache is not None: # we can't assume the 'metadatat_inheritance' part of the request cache dict has been # defined if 'metadata_inheritance' not in self.request_cache.data: self.request_cache.data['metadata_inheritance'] = {} self.request_cache.data['metadata_inheritance'][unicode(course_id)] = tree return tree def refresh_cached_metadata_inheritance_tree(self, course_id, runtime=None): """ Refresh the cached metadata inheritance tree for the org/course combination for location If given a runtime, it replaces the cached_metadata in that runtime. NOTE: failure to provide a runtime may mean that some objects report old values for inherited data. """ course_id = course_id.for_branch(None) if not self._is_in_bulk_operation(course_id): # below is done for side effects when runtime is None cached_metadata = self._get_cached_metadata_inheritance_tree(course_id, force_refresh=True) if runtime: runtime.cached_metadata = cached_metadata def _clean_item_data(self, item): """ Renames the '_id' field in item to 'location' """ item['location'] = item['_id'] del item['_id'] @autoretry_read() def _query_children_for_cache_children(self, course_key, items): """ Generate a pymongo in query for finding the items and return the payloads """ # first get non-draft in a round-trip query = { '_id': {'$in': [ course_key.make_usage_key_from_deprecated_string(item).to_deprecated_son() for item in items ]} } return list(self.collection.find(query)) def _cache_children(self, course_key, items, depth=0): """ Returns a dictionary mapping Location -> item data, populated with json data for all descendents of items up to the specified depth. (0 = no descendents, 1 = children, 2 = grandchildren, etc) If depth is None, will load all the children. This will make a number of queries that is linear in the depth. """ data = {} to_process = list(items) course_key = self.fill_in_run(course_key) parent_cache = self._get_parent_cache(self.get_branch_setting()) while to_process and depth is None or depth >= 0: children = [] for item in to_process: self._clean_item_data(item) item_location = Location._from_deprecated_son(item['location'], course_key.run) item_children = item.get('definition', {}).get('children', []) children.extend(item_children) for item_child in item_children: parent_cache.set(item_child, item_location) data[item_location] = item if depth == 0: break # Load all children by id. See # http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or # for or-query syntax to_process = [] if children: to_process = self._query_children_for_cache_children(course_key, children) # If depth is None, then we just recurse until we hit all the descendents if depth is not None: depth -= 1 return data @contract( course_key=CourseKey, item=dict, apply_cached_metadata=bool, using_descriptor_system="None|CachingDescriptorSystem" ) def _load_item(self, course_key, item, data_cache, apply_cached_metadata=True, using_descriptor_system=None, for_parent=None): """ Load an XModuleDescriptor from item, using the children stored in data_cache Arguments: course_key (CourseKey): which course to load from item (dict): A dictionary with the following keys: location: The serialized UsageKey for the item to load data_dir (optional): The directory name to use as the root data directory for this XModule data_cache (dict): A dictionary mapping from UsageKeys to xblock field data (this is the xblock data loaded from the database) apply_cached_metadata (bool): Whether to use the cached metadata for inheritance purposes. using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem to add data to, and to load the XBlocks from. for_parent (:class:`XBlock`): The parent of the XBlock being loaded. """ course_key = self.fill_in_run(course_key) location = Location._from_deprecated_son(item['location'], course_key.run) data_dir = getattr(item, 'data_dir', location.course) root = self.fs_root / data_dir resource_fs = _OSFS_INSTANCE.setdefault(root, OSFS(root, create=True)) cached_metadata = {} if apply_cached_metadata: cached_metadata = self._get_cached_metadata_inheritance_tree(course_key) if using_descriptor_system is None: services = {} if self.i18n_service: services["i18n"] = self.i18n_service if self.fs_service: services["fs"] = self.fs_service if self.user_service: services["user"] = self.user_service services["settings"] = SettingsService() if self.request_cache: services["request_cache"] = self.request_cache system = CachingDescriptorSystem( modulestore=self, course_key=course_key, module_data=data_cache, default_class=self.default_class, resources_fs=resource_fs, error_tracker=self.error_tracker, render_template=self.render_template, cached_metadata=cached_metadata, mixins=self.xblock_mixins, select=self.xblock_select, disabled_xblock_types=self.disabled_xblock_types, services=services, ) else: system = using_descriptor_system system.module_data.update(data_cache) system.cached_metadata.update(cached_metadata) return system.load_item(location, for_parent=for_parent) def _load_items(self, course_key, items, depth=0, using_descriptor_system=None, for_parent=None): """ Load a list of xmodules from the data in items, with children cached up to specified depth """ course_key = self.fill_in_run(course_key) data_cache = self._cache_children(course_key, items, depth) # if we are loading a course object, if we're not prefetching children (depth != 0) then don't # bother with the metadata inheritance return [ self._load_item( course_key, item, data_cache, using_descriptor_system=using_descriptor_system, apply_cached_metadata=self._should_apply_cached_metadata(item, depth), for_parent=for_parent, ) for item in items ] def _should_apply_cached_metadata(self, item, depth): """ Returns a boolean whether a particular query should trigger an application of inherited metadata onto the item """ category = item['location']['category'] apply_cached_metadata = category not in _DETACHED_CATEGORIES and \ not (category == 'course' and depth == 0) return apply_cached_metadata @autoretry_read() def get_courses(self, **kwargs): ''' Returns a list of course descriptors. This accepts an optional parameter of 'org' which will apply an efficient filter to only get courses with the specified ORG ''' course_org_filter = kwargs.get('org') if course_org_filter: course_records = self.collection.find({'_id.category': 'course', '_id.org': course_org_filter}) else: course_records = self.collection.find({'_id.category': 'course'}) base_list = sum( [ self._load_items( SlashSeparatedCourseKey(course['_id']['org'], course['_id']['course'], course['_id']['name']), [course] ) for course # I tried to add '$and': [{'_id.org': {'$ne': 'edx'}}, {'_id.course': {'$ne': 'templates'}}] # but it didn't do the right thing (it filtered all edx and all templates out) in course_records if not ( # TODO kill this course['_id']['org'] == 'edx' and course['_id']['course'] == 'templates' ) ], [] ) return [course for course in base_list if not isinstance(course, ErrorDescriptor)] def _find_one(self, location): '''Look for a given location in the collection. If the item is not present, raise ItemNotFoundError. ''' assert isinstance(location, UsageKey) item = self.collection.find_one( {'_id': location.to_deprecated_son()} ) if item is None: raise ItemNotFoundError(location) return item def make_course_key(self, org, course, run): """ Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore that matches the supplied `org`, `course`, and `run`. This key may represent a course that doesn't exist in this modulestore. """ return CourseLocator(org, course, run, deprecated=True) def make_course_usage_key(self, course_key): """ Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore that matches the supplied course_key. """ return BlockUsageLocator(course_key, 'course', course_key.run) def get_course(self, course_key, depth=0, **kwargs): """ Get the course with the given courseid (org/course/run) """ assert isinstance(course_key, CourseKey) course_key = self.fill_in_run(course_key) location = course_key.make_usage_key('course', course_key.run) try: return self.get_item(location, depth=depth) except ItemNotFoundError: return None def has_course(self, course_key, ignore_case=False, **kwargs): """ Returns the course_id of the course if it was found, else None Note: we return the course_id instead of a boolean here since the found course may have a different id than the given course_id when ignore_case is True. If ignore_case is True, do a case insensitive search, otherwise, do a case sensitive search """ assert isinstance(course_key, CourseKey) if isinstance(course_key, LibraryLocator): return None # Libraries require split mongo course_key = self.fill_in_run(course_key) location = course_key.make_usage_key('course', course_key.run) if ignore_case: course_query = location.to_deprecated_son('_id.') for key in course_query.iterkeys(): if isinstance(course_query[key], basestring): course_query[key] = re.compile(r"(?i)^{}$".format(course_query[key])) else: course_query = {'_id': location.to_deprecated_son()} course = self.collection.find_one(course_query, fields={'_id': True}) if course: return SlashSeparatedCourseKey(course['_id']['org'], course['_id']['course'], course['_id']['name']) else: return None def has_item(self, usage_key): """ Returns True if location exists in this ModuleStore. """ try: self._find_one(usage_key) return True except ItemNotFoundError: return False def get_item(self, usage_key, depth=0, using_descriptor_system=None, for_parent=None, **kwargs): """ Returns an XModuleDescriptor instance for the item at location. If any segment of the location is None except revision, raises xmodule.modulestore.exceptions.InsufficientSpecificationError If no object is found at that location, raises xmodule.modulestore.exceptions.ItemNotFoundError Arguments: usage_key: a :class:`.UsageKey` instance depth (int): An argument that some module stores may use to prefetch descendents of the queried modules for more efficient results later in the request. The depth is counted in the number of calls to get_children() to cache. None indicates to cache all descendents. using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem to add data to, and to load the XBlocks from. """ item = self._find_one(usage_key) module = self._load_items( usage_key.course_key, [item], depth, using_descriptor_system=using_descriptor_system, for_parent=for_parent, )[0] return module @staticmethod def _course_key_to_son(course_id, tag='i4x'): """ Generate the partial key to look up items relative to a given course """ return SON([ ('_id.tag', tag), ('_id.org', course_id.org), ('_id.course', course_id.course), ]) @staticmethod def _id_dict_to_son(id_dict): """ Generate the partial key to look up items relative to a given course """ return SON([ (key, id_dict[key]) for key in ('tag', 'org', 'course', 'category', 'name', 'revision') ]) @autoretry_read() def get_items( self, course_id, settings=None, content=None, key_revision=MongoRevisionKey.published, qualifiers=None, using_descriptor_system=None, **kwargs ): """ Returns: list of XModuleDescriptor instances for the matching items within the course with the given course_id NOTE: don't use this to look for courses as the course_id is required. Use get_courses which is a lot faster anyway. If you don't provide a value for revision, this limits the result to only ones in the published course. Call this method on draft mongo store if you want to include drafts. Args: course_id (CourseKey): the course identifier settings (dict): fields to look for which have settings scope. Follows same syntax and rules as qualifiers below content (dict): fields to look for which have content scope. Follows same syntax and rules as qualifiers below. key_revision (str): the revision of the items you're looking for. MongoRevisionKey.draft - only returns drafts MongoRevisionKey.published (equates to None) - only returns published If you want one of each matching xblock but preferring draft to published, call this same method on the draft modulestore with ModuleStoreEnum.RevisionOption.draft_preferred. qualifiers (dict): what to look for within the course. Common qualifiers are ``category`` or any field name. if the target field is a list, then it searches for the given value in the list not list equivalence. Substring matching pass a regex object. For this modulestore, ``name`` is a commonly provided key (Location based stores) This modulestore does not allow searching dates by comparison or edited_by, previous_version, update_version info. using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem to add data to, and to load the XBlocks from. """ qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here) query = self._course_key_to_son(course_id) query['_id.revision'] = key_revision for field in ['category', 'name']: if field in qualifiers: query['_id.' + field] = qualifiers.pop(field) for key, value in (settings or {}).iteritems(): query['metadata.' + key] = value for key, value in (content or {}).iteritems(): query['definition.data.' + key] = value if 'children' in qualifiers: query['definition.children'] = qualifiers.pop('children') query.update(qualifiers) items = self.collection.find( query, sort=[SORT_REVISION_FAVOR_DRAFT], ) modules = self._load_items( course_id, list(items), using_descriptor_system=using_descriptor_system ) return modules def create_course(self, org, course, run, user_id, fields=None, **kwargs): """ Creates and returns the course. Args: org (str): the organization that owns the course course (str): the name of the course run (str): the name of the run user_id: id of the user creating the course fields (dict): Fields to set on the course at initialization kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation Returns: a CourseDescriptor Raises: InvalidLocationError: If a course with the same org, course, and run already exists """ course_id = SlashSeparatedCourseKey(org, course, run) # Check if a course with this org/course has been defined before (case-insensitive) course_search_location = SON([ ('_id.tag', 'i4x'), ('_id.org', re.compile(u'^{}$'.format(course_id.org), re.IGNORECASE)), ('_id.course', re.compile(u'^{}$'.format(course_id.course), re.IGNORECASE)), ('_id.category', 'course'), ]) courses = self.collection.find(course_search_location, fields=('_id')) if courses.count() > 0: raise DuplicateCourseError(course_id, courses[0]['_id']) with self.bulk_operations(course_id): xblock = self.create_item(user_id, course_id, 'course', course_id.run, fields=fields, **kwargs) # create any other necessary things as a side effect super(MongoModuleStore, self).create_course( org, course, run, user_id, runtime=xblock.runtime, **kwargs ) return xblock def create_xblock( self, runtime, course_key, block_type, block_id=None, fields=None, metadata=None, definition_data=None, **kwargs ): """ Create the new xblock but don't save it. Returns the new module. :param runtime: if you already have an xblock from the course, the xblock.runtime value :param fields: a dictionary of field names and values for the new xmodule """ if metadata is None: metadata = {} if definition_data is None: definition_data = {} # @Cale, should this use LocalId like we do in split? if block_id is None: if block_type == 'course': block_id = course_key.run else: block_id = u'{}_{}'.format(block_type, uuid4().hex[:5]) if runtime is None: services = {} if self.i18n_service: services["i18n"] = self.i18n_service if self.fs_service: services["fs"] = self.fs_service if self.user_service: services["user"] = self.user_service runtime = CachingDescriptorSystem( modulestore=self, module_data={}, course_key=course_key, default_class=self.default_class, resources_fs=None, error_tracker=self.error_tracker, render_template=self.render_template, cached_metadata={}, mixins=self.xblock_mixins, select=self.xblock_select, services=services, ) xblock_class = runtime.load_block_type(block_type) location = course_key.make_usage_key(block_type, block_id) dbmodel = self._create_new_field_data(block_type, location, definition_data, metadata) xmodule = runtime.construct_xblock_from_class( xblock_class, # We're loading a descriptor, so student_id is meaningless # We also don't have separate notions of definition and usage ids yet, # so we use the location for both. ScopeIds(None, block_type, location, location), dbmodel, for_parent=kwargs.get('for_parent'), ) if fields is not None: for key, value in fields.iteritems(): setattr(xmodule, key, value) # decache any pending field settings from init xmodule.save() return xmodule def create_item(self, user_id, course_key, block_type, block_id=None, **kwargs): """ Creates and saves a new item in a course. Returns the newly created item. Args: user_id: ID of the user creating and saving the xmodule course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create this item in block_type: The typo of block to create block_id: a unique identifier for the new item. If not supplied, a new identifier will be generated """ if block_id is None: if block_type == 'course': block_id = course_key.run else: block_id = u'{}_{}'.format(block_type, uuid4().hex[:5]) runtime = kwargs.pop('runtime', None) xblock = self.create_xblock(runtime, course_key, block_type, block_id, **kwargs) xblock = self.update_item(xblock, user_id, allow_not_found=True) return xblock def create_child(self, user_id, parent_usage_key, block_type, block_id=None, **kwargs): """ Creates and saves a new xblock that as a child of the specified block Returns the newly created item. Args: user_id: ID of the user creating and saving the xmodule parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the block that this item should be parented under block_type: The typo of block to create block_id: a unique identifier for the new item. If not supplied, a new identifier will be generated """ # attach to parent if given parent = None if parent_usage_key is not None: parent = self.get_item(parent_usage_key) kwargs.setdefault('for_parent', parent) xblock = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, **kwargs) if parent is not None and 'detached' not in xblock._class_tags: # Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS')) if kwargs.get('position') is None: parent.children.append(xblock.location) else: parent.children.insert(kwargs.get('position'), xblock.location) self.update_item(parent, user_id, child_update=True) return xblock def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs): """ Simple implementation of overwriting any existing xblock """ if block_type == 'course': block_id = course_key.run xblock = self.create_xblock(runtime, course_key, block_type, block_id, fields) return self.update_item(xblock, user_id, allow_not_found=True) def _get_course_for_item(self, location, depth=0): ''' for a given Xmodule, return the course that it belongs to Also we have to assert that this module maps to only one course item - it'll throw an assert if not ''' return self.get_course(location.course_key, depth) def _update_single_item(self, location, update, allow_not_found=False): """ Set update on the specified item, and raises ItemNotFoundError if the location doesn't exist """ bulk_record = self._get_bulk_ops_record(location.course_key) bulk_record.dirty = True # See http://www.mongodb.org/display/DOCS/Updating for # atomic update syntax result = self.collection.update( {'_id': location.to_deprecated_son()}, {'$set': update}, multi=False, upsert=allow_not_found, w=1, # wait until primary commits ) if result['n'] == 0: raise ItemNotFoundError(location) def _update_ancestors(self, location, update): """ Recursively applies update to all the ancestors of location """ parent = self._get_raw_parent_location(as_published(location), ModuleStoreEnum.RevisionOption.draft_preferred) if parent: self._update_single_item(parent, update) self._update_ancestors(parent, update) def update_item(self, xblock, user_id, allow_not_found=False, force=False, isPublish=False, is_publish_root=True): """ Update the persisted version of xblock to reflect its current values. xblock: which xblock to persist user_id: who made the change (ignored for now by this modulestore) allow_not_found: whether to create a new object if one didn't already exist or give an error force: force is meaningless for this modulestore isPublish: an internal parameter that indicates whether this update is due to a Publish operation, and thus whether the item's published information should be updated. is_publish_root: when publishing, this indicates whether xblock is the root of the publish and should therefore propagate subtree edit info up the tree """ course_key = xblock.location.course_key try: definition_data = self._serialize_scope(xblock, Scope.content) now = datetime.now(UTC) payload = { 'definition.data': definition_data, 'metadata': self._serialize_scope(xblock, Scope.settings), 'edit_info': { 'edited_on': now, 'edited_by': user_id, 'subtree_edited_on': now, 'subtree_edited_by': user_id, } } if isPublish: payload['edit_info']['published_date'] = now payload['edit_info']['published_by'] = user_id elif 'published_date' in getattr(xblock, '_edit_info', {}): payload['edit_info']['published_date'] = xblock._edit_info['published_date'] payload['edit_info']['published_by'] = xblock._edit_info['published_by'] if xblock.has_children: children = self._serialize_scope(xblock, Scope.children) payload.update({'definition.children': children['children']}) # Remove all old pointers to me, then add my current children back parent_cache = self._get_parent_cache(self.get_branch_setting()) parent_cache.delete_by_value(xblock.location) for child in xblock.children: parent_cache.set(unicode(child), xblock.location) self._update_single_item(xblock.scope_ids.usage_id, payload, allow_not_found=allow_not_found) # update subtree edited info for ancestors # don't update the subtree info for descendants of the publish root for efficiency if not isPublish or (isPublish and is_publish_root): ancestor_payload = { 'edit_info.subtree_edited_on': now, 'edit_info.subtree_edited_by': user_id } self._update_ancestors(xblock.scope_ids.usage_id, ancestor_payload) # update the edit info of the instantiated xblock xblock._edit_info = payload['edit_info'] # recompute (and update) the metadata inheritance tree which is cached self.refresh_cached_metadata_inheritance_tree(xblock.scope_ids.usage_id.course_key, xblock.runtime) # fire signal that we've written to DB except ItemNotFoundError: if not allow_not_found: raise elif not self.has_course(course_key): raise ItemNotFoundError(course_key) return xblock def _serialize_scope(self, xblock, scope): """ Find all fields of type reference and convert the payload from UsageKeys to deprecated strings :param xblock: the XBlock class :param jsonfields: a dict of the jsonified version of the fields """ jsonfields = {} for field_name, field in xblock.fields.iteritems(): if field.scope == scope and field.is_set_on(xblock): if field.scope == Scope.parent: continue elif isinstance(field, Reference): jsonfields[field_name] = unicode(field.read_from(xblock)) elif isinstance(field, ReferenceList): jsonfields[field_name] = [ unicode(ele) for ele in field.read_from(xblock) ] elif isinstance(field, ReferenceValueDict): jsonfields[field_name] = { key: unicode(subvalue) for key, subvalue in field.read_from(xblock).iteritems() } else: jsonfields[field_name] = field.read_json(xblock) return jsonfields def _get_non_orphan_parents(self, location, parents, revision): """ Extract non orphan parents by traversing the list of possible parents and remove current location from orphan parents to avoid parents calculation overhead next time. """ non_orphan_parents = [] # get bulk_record once rather than for each iteration bulk_record = self._get_bulk_ops_record(location.course_key) for parent in parents: parent_loc = Location._from_deprecated_son(parent['_id'], location.course_key.run) # travel up the tree for orphan validation ancestor_loc = parent_loc while ancestor_loc is not None: current_loc = ancestor_loc ancestor_loc = self._get_raw_parent_location(as_published(current_loc), revision) if ancestor_loc is None: bulk_record.dirty = True # The parent is an orphan, so remove all the children including # the location whose parent we are looking for from orphan parent self.collection.update( {'_id': parent_loc.to_deprecated_son()}, {'$set': {'definition.children': []}}, multi=False, upsert=True, ) elif ancestor_loc.category == 'course': # once we reach the top location of the tree and if the location is not an orphan then the # parent is not an orphan either non_orphan_parents.append(parent_loc) break return non_orphan_parents def _get_raw_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only): ''' Helper for get_parent_location that finds the location that is the parent of this location in this course, but does NOT return a version agnostic location. ''' assert location.revision is None assert revision == ModuleStoreEnum.RevisionOption.published_only \ or revision == ModuleStoreEnum.RevisionOption.draft_preferred parent_cache = self._get_parent_cache(self.get_branch_setting()) if parent_cache.has(unicode(location)): return parent_cache.get(unicode(location)) # create a query with tag, org, course, and the children field set to the given location query = self._course_key_to_son(location.course_key) query['definition.children'] = unicode(location) # if only looking for the PUBLISHED parent, set the revision in the query to None if revision == ModuleStoreEnum.RevisionOption.published_only: query['_id.revision'] = MongoRevisionKey.published def cache_and_return(parent_loc): # pylint:disable=missing-docstring parent_cache.set(unicode(location), parent_loc) return parent_loc # query the collection, sorting by DRAFT first parents = list( self.collection.find(query, {'_id': True}, sort=[SORT_REVISION_FAVOR_DRAFT]) ) if len(parents) == 0: # no parents were found return cache_and_return(None) if revision == ModuleStoreEnum.RevisionOption.published_only: if len(parents) > 1: non_orphan_parents = self._get_non_orphan_parents(location, parents, revision) if len(non_orphan_parents) == 0: # no actual parent found return cache_and_return(None) if len(non_orphan_parents) > 1: # should never have multiple PUBLISHED parents raise ReferentialIntegrityError( u"{} parents claim {}".format(len(parents), location) ) else: return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run)) else: # return the single PUBLISHED parent return cache_and_return(Location._from_deprecated_son(parents[0]['_id'], location.course_key.run)) else: # there could be 2 different parents if # (1) the draft item was moved or # (2) the parent itself has 2 versions: DRAFT and PUBLISHED # if there are multiple parents with version PUBLISHED then choose from non-orphan parents all_parents = [] published_parents = 0 for parent in parents: if parent['_id']['revision'] is None: published_parents += 1 all_parents.append(parent) # since we sorted by SORT_REVISION_FAVOR_DRAFT, the 0'th parent is the one we want if published_parents > 1: non_orphan_parents = self._get_non_orphan_parents(location, all_parents, revision) return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run)) found_id = all_parents[0]['_id'] # don't disclose revision outside modulestore return cache_and_return(Location._from_deprecated_son(found_id, location.course_key.run)) def get_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only, **kwargs): ''' Find the location that is the parent of this location in this course. Returns: version agnostic location (revision always None) as per the rest of mongo. Args: revision: ModuleStoreEnum.RevisionOption.published_only - return only the PUBLISHED parent if it exists, else returns None ModuleStoreEnum.RevisionOption.draft_preferred - return either the DRAFT or PUBLISHED parent, preferring DRAFT, if parent(s) exists, else returns None ''' parent = self._get_raw_parent_location(location, revision) if parent: return parent return None def get_modulestore_type(self, course_key=None): """ Returns an enumeration-like type reflecting the type of this modulestore per ModuleStoreEnum.Type Args: course_key: just for signature compatibility """ return ModuleStoreEnum.Type.mongo def get_orphans(self, course_key, **kwargs): """ Return an array of all of the locations for orphans in the course. """ course_key = self.fill_in_run(course_key) detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")] query = self._course_key_to_son(course_key) query['_id.category'] = {'$nin': detached_categories} all_items = self.collection.find(query) all_reachable = set() item_locs = set() for item in all_items: if item['_id']['category'] != 'course': # It would be nice to change this method to return UsageKeys instead of the deprecated string. item_locs.add( unicode(as_published(Location._from_deprecated_son(item['_id'], course_key.run))) ) all_reachable = all_reachable.union(item.get('definition', {}).get('children', [])) item_locs -= all_reachable return [course_key.make_usage_key_from_deprecated_string(item_loc) for item_loc in item_locs] def get_courses_for_wiki(self, wiki_slug, **kwargs): """ Return the list of courses which use this wiki_slug :param wiki_slug: the course wiki root slug :return: list of course keys """ courses = self.collection.find( {'_id.category': 'course', 'definition.data.wiki_slug': wiki_slug}, {'_id': True} ) # the course's run == its name. It's the only xblock for which that's necessarily true. return [ Location._from_deprecated_son(course['_id'], course['_id']['name']).course_key for course in courses ] def _create_new_field_data(self, _category, _location, definition_data, metadata): """ To instantiate a new xmodule which will be saved later, set up the dbModel and kvs """ kvs = MongoKeyValueStore( definition_data, None, [], metadata, ) field_data = KvsFieldData(kvs) return field_data def _find_course_assets(self, course_key): """ Internal; finds (or creates) course asset info about all assets for a particular course Arguments: course_key (CourseKey): course identifier Returns: CourseAssetsFromStorage object, wrapping the relevant Mongo doc. If asset metadata exists, other keys will be the other asset types with values as lists of asset metadata. """ # Using the course_key, find or insert the course asset metadata document. # A single document exists per course to store the course asset metadata. course_key = self.fill_in_run(course_key) if course_key.run is None: log.warning(u'No run found for combo org "{}" course "{}" on asset request.'.format( course_key.org, course_key.course )) course_assets = None else: # Complete course key, so query for asset metadata. course_assets = self.asset_collection.find_one( {'course_id': unicode(course_key)}, ) doc_id = None if course_assets is None else course_assets['_id'] if course_assets is None: # Check to see if the course is created in the course collection. if self.get_course(course_key) is None: raise ItemNotFoundError(course_key) else: # Course exists, so create matching assets document. course_assets = {'course_id': unicode(course_key), 'assets': {}} doc_id = self.asset_collection.insert(course_assets) elif isinstance(course_assets['assets'], list): # This record is in the old course assets format. # Ensure that no data exists before updating the format. assert len(course_assets['assets']) == 0 # Update the format to a dict. self.asset_collection.update( {'_id': doc_id}, {'$set': {'assets': {}}} ) # Pass back wrapped 'assets' dict with the '_id' key added to it for document update purposes. return CourseAssetsFromStorage(course_key, doc_id, course_assets['assets']) def _make_mongo_asset_key(self, asset_type): """ Given a asset type, form a key needed to update the proper embedded field in the Mongo doc. """ return 'assets.{}'.format(asset_type) @contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long') def _save_asset_metadata_list(self, asset_metadata_list, user_id, import_only): """ Internal; saves the info for a particular course's asset. Arguments: asset_metadata_list (list(AssetMetadata)): list of data about several course assets user_id (int|long): user ID saving the asset metadata import_only (bool): True if edited_on/by data should remain unchanged. """ course_key = asset_metadata_list[0].asset_id.course_key course_assets = self._find_course_assets(course_key) assets_by_type = self._save_assets_by_type(course_key, asset_metadata_list, course_assets, user_id, import_only) # Build an update set with potentially multiple embedded fields. updates_by_type = {} for asset_type, assets in assets_by_type.iteritems(): updates_by_type[self._make_mongo_asset_key(asset_type)] = assets.as_list() # Update the document. self.asset_collection.update( {'_id': course_assets.doc_id}, {'$set': updates_by_type} ) return True @contract(asset_metadata='AssetMetadata', user_id='int|long') def save_asset_metadata(self, asset_metadata, user_id, import_only=False): """ Saves the info for a particular course's asset. Arguments: asset_metadata (AssetMetadata): data about the course asset data user_id (int|long): user ID saving the asset metadata import_only (bool): True if importing without editing, False if editing Returns: True if info save was successful, else False """ return self._save_asset_metadata_list([asset_metadata, ], user_id, import_only) @contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long') def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False): """ Saves the asset metadata for each asset in a list of asset metadata. Optimizes the saving of many assets. Args: asset_metadata (AssetMetadata): data about the course asset data user_id (int|long): user ID saving the asset metadata import_only (bool): True if importing without editing, False if editing Returns: True if info save was successful, else False """ return self._save_asset_metadata_list(asset_metadata_list, user_id, import_only) @contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long') def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id): """ Copy all the course assets from source_course_key to dest_course_key. If dest_course already has assets, this removes the previous value. It doesn't combine the assets in dest. Arguments: source_course_key (CourseKey): identifier of course to copy from dest_course_key (CourseKey): identifier of course to copy to """ source_assets = self._find_course_assets(source_course_key) dest_assets = {'assets': source_assets.asset_md.copy(), 'course_id': unicode(dest_course_key)} self.asset_collection.remove({'course_id': unicode(dest_course_key)}) # Update the document. self.asset_collection.insert(dest_assets) @contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long') def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id): """ Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts. Arguments: asset_key (AssetKey): asset identifier attr_dict (dict): attribute: value pairs to set Raises: ItemNotFoundError if no such item exists AttributeError is attr is one of the build in attrs. """ course_assets, asset_idx = self._find_course_asset(asset_key) if asset_idx is None: raise ItemNotFoundError(asset_key) # Form an AssetMetadata. all_assets = course_assets[asset_key.asset_type] md = AssetMetadata(asset_key, asset_key.path) md.from_storable(all_assets[asset_idx]) md.update(attr_dict) # Generate a Mongo doc from the metadata and update the course asset info. all_assets[asset_idx] = md.to_storable() self.asset_collection.update( {'_id': course_assets.doc_id}, {"$set": {self._make_mongo_asset_key(asset_key.asset_type): all_assets}} ) @contract(asset_key='AssetKey', user_id='int|long') def delete_asset_metadata(self, asset_key, user_id): """ Internal; deletes a single asset's metadata. Arguments: asset_key (AssetKey): key containing original asset filename Returns: Number of asset metadata entries deleted (0 or 1) """ course_assets, asset_idx = self._find_course_asset(asset_key) if asset_idx is None: return 0 all_asset_info = course_assets[asset_key.asset_type] all_asset_info.pop(asset_idx) # Update the document. self.asset_collection.update( {'_id': course_assets.doc_id}, {'$set': {self._make_mongo_asset_key(asset_key.asset_type): all_asset_info}} ) return 1 # pylint: disable=unused-argument @contract(course_key='CourseKey', user_id='int|long') def delete_all_asset_metadata(self, course_key, user_id): """ Delete all of the assets which use this course_key as an identifier. Arguments: course_key (CourseKey): course_identifier """ # Using the course_id, find the course asset metadata document. # A single document exists per course to store the course asset metadata. try: course_assets = self._find_course_assets(course_key) self.asset_collection.remove(course_assets.doc_id) except ItemNotFoundError: # When deleting asset metadata, if a course's asset metadata is not present, no big deal. pass def heartbeat(self): """ Check that the db is reachable. """ if self.database.connection.alive(): return {ModuleStoreEnum.Type.mongo: True} else: raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo') def ensure_indexes(self): """ Ensure that all appropriate indexes are created that are needed by this modulestore, or raise an exception if unable to. This method is intended for use by tests and administrative commands, and not to be run during server startup. """ # Because we often query for some subset of the id, we define this index: self.collection.create_index( [ ('_id.tag', pymongo.ASCENDING), ('_id.org', pymongo.ASCENDING), ('_id.course', pymongo.ASCENDING), ('_id.category', pymongo.ASCENDING), ('_id.name', pymongo.ASCENDING), ('_id.revision', pymongo.ASCENDING), ], background=True) # Because we often scan for all category='course' regardless of the value of the other fields: self.collection.create_index('_id.category', background=True) # Because lms calls get_parent_locations frequently (for path generation): self.collection.create_index('definition.children', sparse=True, background=True) # To allow prioritizing draft vs published material self.collection.create_index('_id.revision', background=True) # Some overrides that still need to be implemented by subclasses def convert_to_draft(self, location, user_id): raise NotImplementedError() def delete_item(self, location, user_id, **kwargs): raise NotImplementedError() def has_changes(self, xblock): raise NotImplementedError() def has_published_version(self, xblock): raise NotImplementedError() def publish(self, location, user_id): raise NotImplementedError() def revert_to_published(self, location, user_id): raise NotImplementedError() def unpublish(self, location, user_id): raise NotImplementedError()
penelopy/luigi
refs/heads/master
test/contrib/bigquery_test.py
7
# -*- coding: utf-8 -*- # # Copyright 2015 Twitter Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This is an integration test for the Bigquery-luigi binding. This test requires credentials that can access GCS & access to a bucket below. Follow the directions in the gcloud tools to set up local credentials. """ import json import os import luigi from luigi.contrib import bigquery from luigi.contrib import gcs from contrib import gcs_test from nose.plugins.attrib import attr PROJECT_ID = gcs_test.PROJECT_ID DATASET_ID = os.environ.get('BQ_TEST_DATASET_ID', 'luigi_tests') @attr('gcloud') class TestLoadTask(bigquery.BigqueryLoadTask): _BIGQUERY_CLIENT = None source = luigi.Parameter() table = luigi.Parameter() @property def schema(self): return [ {'mode': 'NULLABLE', 'name': 'field1', 'type': 'STRING'}, {'mode': 'NULLABLE', 'name': 'field2', 'type': 'INTEGER'}, ] def source_uris(self): return [self.source] def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self._BIGQUERY_CLIENT) @attr('gcloud') class TestRunQueryTask(bigquery.BigqueryRunQueryTask): _BIGQUERY_CLIENT = None query = ''' SELECT 'hello' as field1, 2 as field2 ''' table = luigi.Parameter() def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self._BIGQUERY_CLIENT) @attr('gcloud') class BigqueryTest(gcs_test._GCSBaseTestCase): def setUp(self): super(BigqueryTest, self).setUp() self.bq_client = bigquery.BigqueryClient(gcs_test.CREDENTIALS) self.table = bigquery.BQTable(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=self.id().split('.')[-1]) self.addCleanup(self.bq_client.delete_table, self.table) def create_dataset(self, data=[]): self.bq_client.delete_table(self.table) text = '\n'.join(map(json.dumps, data)) gcs_file = gcs_test.bucket_url(self.id()) self.client.put_string(text, gcs_file) task = TestLoadTask(source=gcs_file, table=self.table.table_id) task._BIGQUERY_CLIENT = self.bq_client task.run() def test_table_uri(self): intended_uri = "bq://" + PROJECT_ID + "/" + \ DATASET_ID + "/" + self.table.table_id self.assertTrue(self.table.uri == intended_uri) def test_load_and_copy(self): self.create_dataset([ {'field1': 'hi', 'field2': 1}, {'field1': 'bye', 'field2': 2}, ]) # Cram some stuff in here to make the tests run faster - loading data takes a while! self.assertTrue(self.bq_client.dataset_exists(self.table)) self.assertTrue(self.bq_client.table_exists(self.table)) self.assertIn(self.table.dataset_id, list(self.bq_client.list_datasets(self.table.project_id))) self.assertIn(self.table.table_id, list(self.bq_client.list_tables(self.table.dataset))) new_table = self.table._replace(table_id=self.table.table_id + '_copy') self.bq_client.copy( source_table=self.table, dest_table=new_table ) self.assertTrue(self.bq_client.table_exists(new_table)) self.bq_client.delete_table(new_table) self.assertFalse(self.bq_client.table_exists(new_table)) def test_run_query(self): task = TestRunQueryTask(table=self.table.table_id) task._BIGQUERY_CLIENT = self.bq_client task.run() self.assertTrue(self.bq_client.table_exists(self.table))
bop/hybrid
refs/heads/master
lib/python2.6/site-packages/django/contrib/formtools/tests/wizard/namedwizardtests/tests.py
90
from __future__ import unicode_literals from django.core.urlresolvers import reverse from django.http import QueryDict from django.test import TestCase from django.contrib.auth.models import User from django.contrib.auth.tests.utils import skipIfCustomUser from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView, NamedUrlCookieWizardView) from django.contrib.formtools.tests.wizard.forms import get_request, Step1, Step2 class NamedWizardTests(object): urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls' def setUp(self): self.testuser, created = User.objects.get_or_create(username='testuser1') self.wizard_step_data[0]['form1-user'] = self.testuser.pk def test_initial_call(self): response = self.client.get(reverse('%s_start' % self.wizard_urlname)) self.assertEqual(response.status_code, 302) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) wizard = response.context['wizard'] self.assertEqual(wizard['steps'].current, 'form1') self.assertEqual(wizard['steps'].step0, 0) self.assertEqual(wizard['steps'].step1, 1) self.assertEqual(wizard['steps'].last, 'form4') self.assertEqual(wizard['steps'].prev, None) self.assertEqual(wizard['steps'].next, 'form2') self.assertEqual(wizard['steps'].count, 4) self.assertEqual(wizard['url_name'], self.wizard_urlname) def test_initial_call_with_params(self): get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'} response = self.client.get(reverse('%s_start' % self.wizard_urlname), get_params) self.assertEqual(response.status_code, 302) # Test for proper redirect GET parameters location = response['Location'] self.assertNotEqual(location.find('?'), -1) querydict = QueryDict(location[location.find('?') + 1:]) self.assertEqual(dict(querydict.items()), get_params) def test_form_post_error(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_1_data) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') self.assertEqual(response.context['wizard']['form'].errors, {'name': ['This field is required.'], 'user': ['This field is required.']}) def test_form_post_success(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) wizard = response.context['wizard'] self.assertEqual(wizard['steps'].current, 'form2') self.assertEqual(wizard['steps'].step0, 1) self.assertEqual(wizard['steps'].prev, 'form1') self.assertEqual(wizard['steps'].next, 'form3') def test_form_stepback(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') response = self.client.post( reverse(self.wizard_urlname, kwargs={ 'step': response.context['wizard']['steps'].current }), {'wizard_goto_step': response.context['wizard']['steps'].prev}) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') def test_form_jump(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form3'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form3') def test_form_finish(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') post_data = self.wizard_step_data[1] post_data['form2-file1'].close() post_data['form2-file1'] = open(__file__, 'rb') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form3') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form4') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) all_data = response.context['form_list'] with open(__file__, 'rb') as f: self.assertEqual(all_data[1]['file1'].read(), f.read()) all_data[1]['file1'].close() del all_data[1]['file1'] self.assertEqual(all_data, [ {'name': 'Pony', 'thirsty': True, 'user': self.testuser}, {'address1': '123 Main St', 'address2': 'Djangoland'}, {'random_crap': 'blah blah'}, [{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]]) def test_cleaned_data(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) post_data = self.wizard_step_data[1] post_data['form2-file1'] = open(__file__, 'rb') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'}) response = self.client.get(step2_url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') with open(__file__, 'rb') as f: self.assertEqual( response.context['wizard']['form'].files['form2-file1'].read(), f.read()) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) all_data = response.context['all_cleaned_data'] with open(__file__, 'rb') as f: self.assertEqual(all_data['file1'].read(), f.read()) all_data['file1'].close() del all_data['file1'] self.assertEqual( all_data, {'name': 'Pony', 'thirsty': True, 'user': self.testuser, 'address1': '123 Main St', 'address2': 'Djangoland', 'random_crap': 'blah blah', 'formset-form4': [ {'random_crap': 'blah blah'}, {'random_crap': 'blah blah'} ]}) def test_manipulated_data(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) post_data = self.wizard_step_data[1] post_data['form2-file1'].close() post_data['form2-file1'] = open(__file__, 'rb') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) loc = response['Location'] response = self.client.get(loc) self.assertEqual(response.status_code, 200, loc) self.client.cookies.pop('sessionid', None) self.client.cookies.pop('wizard_cookie_contact_wizard', None) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') def test_form_reset(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') response = self.client.get( '%s?reset=1' % reverse('%s_start' % self.wizard_urlname)) self.assertEqual(response.status_code, 302) response = self.client.get(response['Location']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') @skipIfCustomUser class NamedSessionWizardTests(NamedWizardTests, TestCase): wizard_urlname = 'nwiz_session' wizard_step_1_data = { 'session_contact_wizard-current_step': 'form1', } wizard_step_data = ( { 'form1-name': 'Pony', 'form1-thirsty': '2', 'session_contact_wizard-current_step': 'form1', }, { 'form2-address1': '123 Main St', 'form2-address2': 'Djangoland', 'session_contact_wizard-current_step': 'form2', }, { 'form3-random_crap': 'blah blah', 'session_contact_wizard-current_step': 'form3', }, { 'form4-INITIAL_FORMS': '0', 'form4-TOTAL_FORMS': '2', 'form4-MAX_NUM_FORMS': '0', 'form4-0-random_crap': 'blah blah', 'form4-1-random_crap': 'blah blah', 'session_contact_wizard-current_step': 'form4', } ) @skipIfCustomUser class NamedCookieWizardTests(NamedWizardTests, TestCase): wizard_urlname = 'nwiz_cookie' wizard_step_1_data = { 'cookie_contact_wizard-current_step': 'form1', } wizard_step_data = ( { 'form1-name': 'Pony', 'form1-thirsty': '2', 'cookie_contact_wizard-current_step': 'form1', }, { 'form2-address1': '123 Main St', 'form2-address2': 'Djangoland', 'cookie_contact_wizard-current_step': 'form2', }, { 'form3-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form3', }, { 'form4-INITIAL_FORMS': '0', 'form4-TOTAL_FORMS': '2', 'form4-MAX_NUM_FORMS': '0', 'form4-0-random_crap': 'blah blah', 'form4-1-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form4', } ) class NamedFormTests(object): urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls' def test_revalidation(self): request = get_request() testform = self.formwizard_class.as_view( [('start', Step1), ('step2', Step2)], url_name=self.wizard_urlname) response, instance = testform(request, step='done') instance.render_done(None) self.assertEqual(instance.storage.current_step, 'start') class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView): def dispatch(self, request, *args, **kwargs): response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs) return response, self class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView): def dispatch(self, request, *args, **kwargs): response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs) return response, self @skipIfCustomUser class NamedSessionFormTests(NamedFormTests, TestCase): formwizard_class = TestNamedUrlSessionWizardView wizard_urlname = 'nwiz_session' @skipIfCustomUser class NamedCookieFormTests(NamedFormTests, TestCase): formwizard_class = TestNamedUrlCookieWizardView wizard_urlname = 'nwiz_cookie'
huran2014/huran.github.io
refs/heads/master
wot_gateway/gateway/bin/gateway/register.py
1
#-*-coding:utf-8-*- import os import sys import socket import json import select import threading import logging import logging.handlers import shutil from gateway import * def dev_write_to_cfg(mac,dev): f=open("./cfg/mac_dev_map.cfg",'a') f.write(mac+"\t"+dev) f.write('\n') f.close() def mac_resID_resPlat_write_to_cfg(mac,resID,resPlat): f=open("./cfg/mac_resID_resPlat_map.cfg",'a') f.write(mac+"\t"+str(resID)+"\t"+resPlat) f.write('\n') f.close() def dev_del(mac): with open('./cfg/mac_dev_map.cfg', 'r') as f: with open('./cfg/mac_dev_map.cfg.tmp', 'w') as g: for rLine in f.readlines(): if rLine!='': lines=rLine.split("\t") if len(lines)==2: if lines[0] != mac: g.write(rLine) shutil.move('./cfg/mac_dev_map.cfg.tmp', './cfg/mac_dev_map.cfg') with open('./cfg/mac_resID_resPlat_map.cfg', 'r') as f: with open('./cfg/mac_resID_resPlat_map.cfg.tmp', 'w') as g: for rLine in f.readlines(): if rLine!='': lines=rLine.split("\t") if len(lines)==3: if lines[0] != mac: g.write(rLine) shutil.move('./cfg/mac_resID_resPlat_map.cfg.tmp', './cfg/mac_resID_resPlat_map.cfg') handler = logging.handlers.RotatingFileHandler('log/register.log', maxBytes = 1024*1024, backupCount = 5) # 实例化handler fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s' formatter = logging.Formatter(fmt) # 实例化formatter handler.setFormatter(formatter) # 为handler添加formatter logger = logging.getLogger('register') # 获取名为tst的logger logger.addHandler(handler) # 为logger添加handler logger.setLevel(logging.DEBUG) class Register_Del(threading.Thread): mac_dev_map = {} mac_resID_resPlat_map = {} def __init__(self,num,hostname,port): threading.Thread.__init__(self) self.thread_num = num self.thread_stop = False self.port = port self.hostname = hostname self.read_config() def read_config(self): f_mac_dev=open("./cfg/mac_dev_map.cfg",'a+') for rLine in f_mac_dev: if rLine!= '': lines=rLine.strip().split('\t') if len(lines)==2: Register_Del.mac_dev_map[lines[0]]=lines[1] else: pass f_mac_dev.close() f_resLocal_resPlat=open("./cfg/mac_resID_resPlat_map.cfg",'a+') for rLine in f_resLocal_resPlat: if rLine!='': lines=rLine.strip().split('\t') if len(lines)==3: if lines[0] not in Register_Del.mac_resID_resPlat_map: Register_Del.mac_resID_resPlat_map[lines[0]]={} Register_Del.mac_resID_resPlat_map[lines[0]][lines[1]]=lines[2] else: Register_Del.mac_resID_resPlat_map[lines[0]][lines[1]]=lines[2] else: pass f_resLocal_resPlat.close() def run(self): server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setblocking(False) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1) server.bind((self.hostname,self.port)) server.listen(5) print 'listening register socket on %s:%d' %(self.hostname,self.port) inputs = [server] outputs = [] message_queues = {} timeout = 20 while not self.thread_stop: while inputs: readable , writable , exceptional = select.select(inputs, outputs, inputs, timeout) #When timeout reached , select return three empty lists if not (readable or writable or exceptional): logger.error("Time out ! ") break for s in readable: if s is server: #A "readable" socket is ready to accept a connection client_sock, client_address = s.accept() print '[RegisterThread] connection from',client_address # select generally matches with non-block socket client_sock.setblocking(0) inputs.append(client_sock) else: buf = s.recv(1024) if buf: try: json_buf=json.dumps(eval(buf)) result=json.loads(json_buf) print 'analyis is succeed' if result['flags']==0: if result['Mac_address'] not in Register_Del.mac_dev_map: dev_id=WrtGateway.add_dev() Register_Del.mac_resID_resPlat_map[result["Mac_address"]]={} Register_Del.mac_dev_map[result['Mac_address']]=dev_id dev_write_to_cfg(result['Mac_address'],dev_id) res_num=result['Res_num'] for i in range(res_num): # if add device, then res must be added !!! #if result['Res'][i]['Res_port'] not in Register_Del.resLocal_resPlat_map: #res_id=WrtGateway.add_res(dev_id) res_type=result['Res'][i]['Res_type'] res_id=WrtGateway.add_res(dev_id,res_type) Register_Del.mac_resID_resPlat_map[result["Mac_address"]][str(result['Res'][i]['Res_port'])]=res_id mac_resID_resPlat_write_to_cfg(result["Mac_address"],result['Res'][i]['Res_port'],res_id) else: res_num=result['Res_num'] dev_id=Register_Del.mac_dev_map[result['Mac_address']] for i in range(res_num): if str(result['Res'][i]['Res_port']) not in Register_Del.mac_resID_resPlat_map[result["Mac_address"]]: #res_id=WrtGateway.add_res(dev_id) res_type=result['Res'][i]['Res_type'] res_id=WrtGateway.add_res(dev_id,res_type) Register_Del.mac_resID_resPlat_map[result["Mac_address"]][str(result['Res'][i]['Res_port'])]=res_id mac_resID_resPlat_write_to_cfg(result["Mac_address"],result['Res'][i]['Res_port'],res_id) else: pass else: dev_id=Register_Del.mac_dev_map[result['Mac_address']] print 'device ' + dev_id + ' exited' WrtGateway.del_dev(dev_id) dev_del(result["Mac_address"]) del Register_Del.mac_dev_map[result["Mac_address"]] del Register_Del.mac_resID_resPlat_map[result["Mac_address"]] print '[RegisterThread] Res map:',Register_Del.mac_resID_resPlat_map except: print 'json analysis error,the buf is ',buf else: inputs.remove(s) s.close() def stop(self): self.thread_stop = True
swyytch/supernova
refs/heads/master
setup.py
1
#!/usr/bin/python # # Copyright 2014 Major Hayden # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from setuptools import setup required_packages = [ "click", "configobj", "keyring", "python-novaclient", "six" ] setup( name='supernova', version='2.0.8', author='Major Hayden', author_email='major@mhtx.net', description="novaclient wrapper for multiple nova environments", install_requires=required_packages, packages=['supernova'], url='https://github.com/major/supernova', entry_points=''' [console_scripts] supernova = supernova.executable:run_supernova supernova-keyring = supernova.executable:run_supernova_keyring ''' )
DarthMaulware/EquationGroupLeaks
refs/heads/master
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/DataHandlers/Mcl_Cmd_Delete_DataHandler.py
1
# uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: Mcl_Cmd_Delete_DataHandler.py def DataHandlerMain(namespace, InputFilename, OutputFilename): import mcl.imports import mcl.data.Input import mcl.data.Output import mcl.status import mcl.target import mcl.object.Message mcl.imports.ImportNamesWithNamespace(namespace, 'mca.file.cmd.delete', globals()) input = mcl.data.Input.GetInput(InputFilename) output = mcl.data.Output.StartOutput(OutputFilename, input) output.Start('Delete', 'delete', []) msg = mcl.object.Message.DemarshalMessage(input.GetData()) if input.GetStatus() != mcl.status.MCL_SUCCESS: errorMsg = msg.FindMessage(mcl.object.Message.MSG_KEY_RESULT_ERROR) moduleError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_MODULE) osError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_OS) output.RecordModuleError(moduleError, osError, errorStrings) output.EndWithStatus(input.GetStatus()) return True from mcl.object.XmlOutput import XmlOutput xml = XmlOutput() xml.Start('Deletions') encounteredErrors = False while msg.GetNumRetrieved() < msg.GetCount(): if mcl.CheckForStop(): output.EndWithStatus(mcl.target.CALL_FAILED) return False result = Result() result.Demarshal(msg) if result.deleteStatus != 0: encounteredErrors = True sub = xml.AddSubElement('FileDelete') sub.AddAttribute('file', result.filename) sub.AddAttribute('statusValue', '0x%08x' % result.deleteStatus) if result.afterReboot: sub.AddAttribute('delay', 'true') else: sub.AddAttribute('delay', 'false') sysError = output.TranslateOsError(result.deleteStatus) sub.AddSubElementWithText('StatusString', sysError) output.RecordXml(xml) if encounteredErrors: output.EndWithStatus(mcl.target.CALL_FAILED) else: output.EndWithStatus(mcl.target.CALL_SUCCEEDED) return True if __name__ == '__main__': import sys try: namespace, InputFilename, OutputFilename = sys.argv[1:] except: print '%s <namespace> <input filename> <output filename>' % sys.argv[0] sys.exit(1) if DataHandlerMain(namespace, InputFilename, OutputFilename) != True: sys.exit(-1)
protatremy/buildbot
refs/heads/master
master/buildbot/test/unit/test_data_connector.py
10
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function from future.builtins import range import mock from twisted.internet import defer from twisted.python import reflect from twisted.trial import unittest from buildbot.data import base from buildbot.data import connector from buildbot.data import exceptions from buildbot.data import resultspec from buildbot.data import types from buildbot.test.fake import fakemaster from buildbot.test.util import interfaces class Tests(interfaces.InterfaceTests): def setUp(self): raise NotImplementedError def test_signature_get(self): @self.assertArgSpecMatches(self.data.get) def get(self, path, filters=None, fields=None, order=None, limit=None, offset=None): pass def test_signature_getEndpoint(self): @self.assertArgSpecMatches(self.data.getEndpoint) def getEndpoint(self, path): pass def test_signature_control(self): @self.assertArgSpecMatches(self.data.control) def control(self, action, args, path): pass def test_signature_updates_addChange(self): @self.assertArgSpecMatches(self.data.updates.addChange) def addChange(self, files=None, comments=None, author=None, revision=None, when_timestamp=None, branch=None, category=None, revlink=u'', properties=None, repository=u'', codebase=None, project=u'', src=None): pass def test_signature_updates_masterActive(self): @self.assertArgSpecMatches(self.data.updates.masterActive) def masterActive(self, name, masterid): pass def test_signature_updates_masterStopped(self): @self.assertArgSpecMatches(self.data.updates.masterStopped) def masterStopped(self, name, masterid): pass def test_signature_updates_addBuildset(self): @self.assertArgSpecMatches(self.data.updates.addBuildset) def addBuildset(self, waited_for, scheduler=None, sourcestamps=None, reason='', properties=None, builderids=None, external_idstring=None, parent_buildid=None, parent_relationship=None): pass def test_signature_updates_maybeBuildsetComplete(self): @self.assertArgSpecMatches(self.data.updates.maybeBuildsetComplete) def maybeBuildsetComplete(self, bsid): pass def test_signature_updates_updateBuilderList(self): @self.assertArgSpecMatches(self.data.updates.updateBuilderList) def updateBuilderList(self, masterid, builderNames): pass class TestFakeData(unittest.TestCase, Tests): def setUp(self): self.master = fakemaster.make_master(testcase=self, wantMq=True, wantData=True, wantDb=True) self.data = self.master.data class TestDataConnector(unittest.TestCase, Tests): def setUp(self): self.master = fakemaster.make_master(testcase=self, wantMq=True) self.data = connector.DataConnector() self.data.setServiceParent(self.master) class DataConnector(unittest.TestCase): def setUp(self): self.master = fakemaster.make_master() # don't load by default self.patch(connector.DataConnector, 'submodules', []) self.data = connector.DataConnector() self.data.setServiceParent(self.master) def patchFooPattern(self): cls = type('FooEndpoint', (base.Endpoint,), {}) ep = cls(None, self.master) ep.get = mock.Mock(name='FooEndpoint.get') ep.get.return_value = defer.succeed({'val': 9999}) self.data.matcher[('foo', 'n:fooid', 'bar')] = ep return ep def patchFooListPattern(self): cls = type('FoosEndpoint', (base.Endpoint,), {}) ep = cls(None, self.master) ep.get = mock.Mock(name='FoosEndpoint.get') ep.get.return_value = defer.succeed( [{'val': v} for v in range(900, 920)]) self.data.matcher[('foo',)] = ep return ep # tests def test_sets_master(self): self.assertIdentical(self.master, self.data.master) def test_scanModule(self): # use this module as a test mod = reflect.namedModule('buildbot.test.unit.test_data_connector') self.data._scanModule(mod) # check that it discovered MyResourceType and updated endpoints match = self.data.matcher[('test', '10')] self.assertIsInstance(match[0], TestEndpoint) self.assertEqual(match[1], dict(testid=10)) match = self.data.matcher[('test', '10', 'p1')] self.assertIsInstance(match[0], TestEndpoint) match = self.data.matcher[('test', '10', 'p2')] self.assertIsInstance(match[0], TestEndpoint) match = self.data.matcher[('test',)] self.assertIsInstance(match[0], TestsEndpoint) self.assertEqual(match[1], dict()) match = self.data.matcher[('test', 'foo')] self.assertIsInstance(match[0], TestsEndpointSubclass) self.assertEqual(match[1], dict()) # and that it found the update method self.assertEqual(self.data.updates.testUpdate(), "testUpdate return") # and that it added the single root link self.assertEqual(self.data.rootLinks, [{'name': 'tests'}]) # and that it added an attribute self.assertIsInstance(self.data.rtypes.test, TestResourceType) def test_getEndpoint(self): ep = self.patchFooPattern() got = self.data.getEndpoint(('foo', '10', 'bar')) self.assertEqual(got, (ep, {'fooid': 10})) def test_getEndpoint_missing(self): self.assertRaises(exceptions.InvalidPathError, lambda: self.data.getEndpoint(('xyz',))) def test_get(self): ep = self.patchFooPattern() d = self.data.get(('foo', '10', 'bar')) @d.addCallback def check(gotten): self.assertEqual(gotten, {'val': 9999}) ep.get.assert_called_once_with(mock.ANY, {'fooid': 10}) return d def test_get_filters(self): ep = self.patchFooListPattern() d = self.data.get(('foo',), filters=[resultspec.Filter('val', 'lt', [902])]) @d.addCallback def check(gotten): self.assertEqual(gotten, base.ListResult( [{'val': 900}, {'val': 901}], total=2)) ep.get.assert_called_once_with(mock.ANY, {}) return d def test_get_resultSpec_args(self): ep = self.patchFooListPattern() f = resultspec.Filter('val', 'gt', [909]) d = self.data.get(('foo',), filters=[f], fields=['val'], order=['-val'], limit=2) @d.addCallback def check(gotten): self.assertEqual(gotten, base.ListResult( [{'val': 919}, {'val': 918}], total=10, limit=2)) ep.get.assert_called_once_with(mock.ANY, {}) return d def test_control(self): ep = self.patchFooPattern() ep.control = mock.Mock(name='MyEndpoint.control') ep.control.return_value = defer.succeed('controlled') d = self.data.control('foo!', {'arg': 2}, ('foo', '10', 'bar')) @d.addCallback def check(gotten): self.assertEqual(gotten, 'controlled') ep.control.assert_called_once_with('foo!', {'arg': 2}, {'fooid': 10}) return d # classes discovered by test_scanModule, above class TestsEndpoint(base.Endpoint): pathPatterns = "/test" rootLinkName = 'tests' class TestsEndpointParentClass(base.Endpoint): rootLinkName = 'shouldnt-see-this' class TestsEndpointSubclass(TestsEndpointParentClass): pathPatterns = "/test/foo" class TestEndpoint(base.Endpoint): pathPatterns = """ /test/n:testid /test/n:testid/p1 /test/n:testid/p2 """ class TestResourceType(base.ResourceType): name = 'test' endpoints = [TestsEndpoint, TestEndpoint, TestsEndpointSubclass] keyFields = ('testid', ) class EntityType(types.Entity): testid = types.Integer() entityType = EntityType(name) @base.updateMethod def testUpdate(self): return "testUpdate return"
jhawkesworth/ansible
refs/heads/devel
lib/ansible/plugins/doc_fragments/digital_ocean.py
44
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2018, Abhijeet Kasurde (akasurde@redhat.com) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) class ModuleDocFragment(object): # Parameters for DigitalOcean modules DOCUMENTATION = r''' options: oauth_token: description: - DigitalOcean OAuth token. - "There are several other environment variables which can be used to provide this value." - "i.e., - 'DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN' and 'OAUTH_TOKEN'" type: str aliases: [ api_token ] timeout: description: - The timeout in seconds used for polling DigitalOcean's API. type: int default: 30 validate_certs: description: - If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates. type: bool default: yes '''
LeXuZZ/localway_tests
refs/heads/master
wtframework/wtf/utils/tests/__init__.py
5
########################################################################## #This file is part of WTFramework. # # WTFramework is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # WTFramework is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WTFramework. If not, see <http://www.gnu.org/licenses/>. ##########################################################################
samdoran/ansible
refs/heads/devel
lib/ansible/modules/packaging/os/portage.py
71
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, William L Thomson Jr # (c) 2013, Yap Sok Ann # Written by Yap Sok Ann <sokann@gmail.com> # Modified by William L. Thomson Jr. <wlt@o-sinc.com> # Based on apt module written by Matthew Williams <matthew@flowroute.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: portage short_description: Package manager for Gentoo description: - Manages Gentoo packages version_added: "1.6" options: package: description: - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) required: false default: null state: description: - State of the package atom required: false default: "present" choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] update: description: - Update packages to the best version available (--update) required: false default: no choices: [ "yes", "no" ] deep: description: - Consider the entire dependency tree of packages (--deep) required: false default: no choices: [ "yes", "no" ] newuse: description: - Include installed packages where USE flags have changed (--newuse) required: false default: no choices: [ "yes", "no" ] changed_use: description: - Include installed packages where USE flags have changed, except when - flags that the user has not enabled are added or removed - (--changed-use) required: false default: no choices: [ "yes", "no" ] version_added: 1.8 oneshot: description: - Do not add the packages to the world file (--oneshot) required: false default: False choices: [ "yes", "no" ] noreplace: description: - Do not re-emerge installed packages (--noreplace) required: false default: False choices: [ "yes", "no" ] nodeps: description: - Only merge packages but not their dependencies (--nodeps) required: false default: False choices: [ "yes", "no" ] onlydeps: description: - Only merge packages' dependencies but not the packages (--onlydeps) required: false default: False choices: [ "yes", "no" ] depclean: description: - Remove packages not needed by explicitly merged packages (--depclean) - If no package is specified, clean up the world's dependencies - Otherwise, --depclean serves as a dependency aware version of --unmerge required: false default: False choices: [ "yes", "no" ] quiet: description: - Run emerge in quiet mode (--quiet) required: false default: False choices: [ "yes", "no" ] verbose: description: - Run emerge in verbose mode (--verbose) required: false default: False choices: [ "yes", "no" ] sync: description: - Sync package repositories first - If yes, perform "emerge --sync" - If web, perform "emerge-webrsync" required: false default: null choices: [ "web", "yes", "no" ] getbinpkg: description: - Prefer packages specified at PORTAGE_BINHOST in make.conf required: false default: False choices: [ "yes", "no" ] usepkgonly: description: - Merge only binaries (no compiling). This sets getbinpkg=yes. required: false default: False choices: [ "yes", "no" ] keepgoing: description: - Continue as much as possible after an error. required: false default: False choices: [ "yes", "no" ] version_added: 2.3 jobs: description: - Specifies the number of packages to build simultaneously. required: false default: None version_added: 2.3 loadavg: description: - Specifies that no new builds should be started if there are - other builds running and the load average is at least LOAD required: false default: None version_added: 2.3 requirements: [ gentoolkit ] author: - "William L Thomson Jr (@wltjr)" - "Yap Sok Ann (@sayap)" - "Andrew Udvare" notes: [] ''' EXAMPLES = ''' # Make sure package foo is installed - portage: package: foo state: present # Make sure package foo is not installed - portage: package: foo state: absent # Update package foo to the "latest" version ( os specific alternative to latest ) - portage: package: foo update: yes # Install package foo using PORTAGE_BINHOST setup - portage: package: foo getbinpkg: yes # Re-install world from binary packages only and do not allow any compiling - portage: package: '@world' usepkgonly: yes # Sync repositories and update world - portage: package: '@world' update: yes deep: yes sync: yes # Remove unneeded packages - portage: depclean: yes # Remove package foo if it is not explicitly needed - portage: package: foo state: absent depclean: yes ''' import os import pipes import re def query_package(module, package, action): if package.startswith('@'): return query_set(module, package, action) return query_atom(module, package, action) def query_atom(module, atom, action): cmd = '%s list %s' % (module.equery_path, atom) rc, out, err = module.run_command(cmd) return rc == 0 def query_set(module, set, action): system_sets = [ '@live-rebuild', '@module-rebuild', '@preserved-rebuild', '@security', '@selected', '@system', '@world', '@x11-module-rebuild', ] if set in system_sets: if action == 'unmerge': module.fail_json(msg='set %s cannot be removed' % set) return False world_sets_path = '/var/lib/portage/world_sets' if not os.path.exists(world_sets_path): return False cmd = 'grep %s %s' % (set, world_sets_path) rc, out, err = module.run_command(cmd) return rc == 0 def sync_repositories(module, webrsync=False): if module.check_mode: module.exit_json(msg='check mode not supported by sync') if webrsync: webrsync_path = module.get_bin_path('emerge-webrsync', required=True) cmd = '%s --quiet' % webrsync_path else: cmd = '%s --sync --quiet --ask=n' % module.emerge_path rc, out, err = module.run_command(cmd) if rc != 0: module.fail_json(msg='could not sync package repositories') # Note: In the 3 functions below, equery is done one-by-one, but emerge is done # in one go. If that is not desirable, split the packages into multiple tasks # instead of joining them together with comma. def emerge_packages(module, packages): p = module.params if not (p['update'] or p['noreplace'] or p['state']=='latest'): for package in packages: if not query_package(module, package, 'emerge'): break else: module.exit_json(changed=False, msg='Packages already present.') if module.check_mode: module.exit_json(changed=True, msg='Packages would be installed.') args = [] emerge_flags = { 'update': '--update', 'deep': '--deep', 'newuse': '--newuse', 'changed_use': '--changed-use', 'oneshot': '--oneshot', 'noreplace': '--noreplace', 'nodeps': '--nodeps', 'onlydeps': '--onlydeps', 'quiet': '--quiet', 'verbose': '--verbose', 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', 'usepkg': '--usepkg', 'keepgoing': '--keep-going', } for flag, arg in emerge_flags.items(): if p[flag]: args.append(arg) if p['state'] and p['state']=='latest': args.append("--update") if p['usepkg'] and p['usepkgonly']: module.fail_json(msg='Use only one of usepkg, usepkgonly') emerge_flags = { 'jobs': '--jobs=', 'loadavg': '--load-average ', } for flag, arg in emerge_flags.items(): if p[flag] is not None: args.append(arg + str(p[flag])) cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Packages not installed.', ) # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite # this error if (p['usepkgonly'] or p['getbinpkg']) \ and 'Permission denied (publickey).' in err: module.fail_json( cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Please check your PORTAGE_BINHOST configuration in make.conf ' 'and your SSH authorized_keys file', ) changed = True for line in out.splitlines(): if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): msg = 'Packages installed.' break elif module.check_mode and re.match(r'\[(binary|ebuild)', line): msg = 'Packages would be installed.' break else: changed = False msg = 'No packages installed.' module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, msg=msg, ) def unmerge_packages(module, packages): p = module.params for package in packages: if query_package(module, package, 'unmerge'): break else: module.exit_json(changed=False, msg='Packages already absent.') args = ['--unmerge'] for flag in ['quiet', 'verbose']: if p[flag]: args.append('--%s' % flag) cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Packages not removed.', ) module.exit_json( changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Packages removed.', ) def cleanup_packages(module, packages): p = module.params if packages: for package in packages: if query_package(module, package, 'unmerge'): break else: module.exit_json(changed=False, msg='Packages already absent.') args = ['--depclean'] for flag in ['quiet', 'verbose']: if p[flag]: args.append('--%s' % flag) cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) removed = 0 for line in out.splitlines(): if not line.startswith('Number removed:'): continue parts = line.split(':') removed = int(parts[1].strip()) changed = removed > 0 module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Depclean completed.', ) def run_emerge(module, packages, *args): args = list(args) args.append('--ask=n') if module.check_mode: args.append('--pretend') cmd = [module.emerge_path] + args + packages return cmd, module.run_command(cmd) portage_present_states = ['present', 'emerged', 'installed', 'latest'] portage_absent_states = ['absent', 'unmerged', 'removed'] def main(): module = AnsibleModule( argument_spec=dict( package=dict(default=None, aliases=['name'], type='list'), state=dict( default=portage_present_states[0], choices=portage_present_states + portage_absent_states, ), update=dict(default=False, type='bool'), deep=dict(default=False, type='bool'), newuse=dict(default=False, type='bool'), changed_use=dict(default=False, type='bool'), oneshot=dict(default=False, type='bool'), noreplace=dict(default=False, type='bool'), nodeps=dict(default=False, type='bool'), onlydeps=dict(default=False, type='bool'), depclean=dict(default=False, type='bool'), quiet=dict(default=False, type='bool'), verbose=dict(default=False, type='bool'), sync=dict(default=None, choices=['yes', 'web', 'no']), getbinpkg=dict(default=False, type='bool'), usepkgonly=dict(default=False, type='bool'), usepkg=dict(default=False, type='bool'), keepgoing=dict(default=False, type='bool'), jobs=dict(default=None, type='int'), loadavg=dict(default=None, type='float'), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], supports_check_mode=True, ) module.emerge_path = module.get_bin_path('emerge', required=True) module.equery_path = module.get_bin_path('equery', required=True) p = module.params if p['sync'] and p['sync'].strip() != 'no': sync_repositories(module, webrsync=(p['sync'] == 'web')) if not p['package']: module.exit_json(msg='Sync successfully finished.') packages = [] if p['package']: packages.extend(p['package']) if p['depclean']: if packages and p['state'] not in portage_absent_states: module.fail_json( msg='Depclean can only be used with package when the state is ' 'one of: %s' % portage_absent_states, ) cleanup_packages(module, packages) elif p['state'] in portage_present_states: emerge_packages(module, packages) elif p['state'] in portage_absent_states: unmerge_packages(module, packages) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
lancezlin/ml_template_py
refs/heads/master
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/appengine.py
360
from __future__ import absolute_import import logging import os import warnings from ..exceptions import ( HTTPError, HTTPWarning, MaxRetryError, ProtocolError, TimeoutError, SSLError ) from ..packages.six import BytesIO from ..request import RequestMethods from ..response import HTTPResponse from ..util.timeout import Timeout from ..util.retry import Retry try: from google.appengine.api import urlfetch except ImportError: urlfetch = None log = logging.getLogger(__name__) class AppEnginePlatformWarning(HTTPWarning): pass class AppEnginePlatformError(HTTPError): pass class AppEngineManager(RequestMethods): """ Connection manager for Google App Engine sandbox applications. This manager uses the URLFetch service directly instead of using the emulated httplib, and is subject to URLFetch limitations as described in the App Engine documentation here: https://cloud.google.com/appengine/docs/python/urlfetch Notably it will raise an AppEnginePlatformError if: * URLFetch is not available. * If you attempt to use this on GAEv2 (Managed VMs), as full socket support is available. * If a request size is more than 10 megabytes. * If a response size is more than 32 megabtyes. * If you use an unsupported request method such as OPTIONS. Beyond those cases, it will raise normal urllib3 errors. """ def __init__(self, headers=None, retries=None, validate_certificate=True): if not urlfetch: raise AppEnginePlatformError( "URLFetch is not available in this environment.") if is_prod_appengine_mvms(): raise AppEnginePlatformError( "Use normal urllib3.PoolManager instead of AppEngineManager" "on Managed VMs, as using URLFetch is not necessary in " "this environment.") warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/latest/contrib.html.", AppEnginePlatformWarning) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate self.retries = retries or Retry.DEFAULT def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, **response_kw): retries = self._get_retries(retries, redirect) try: response = urlfetch.fetch( url, payload=body, method=method, headers=headers or {}, allow_truncated=False, follow_redirects=( redirect and retries.redirect != 0 and retries.total), deadline=self._get_absolute_timeout(timeout), validate_certificate=self.validate_certificate, ) except urlfetch.DeadlineExceededError as e: raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: if 'too large' in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " "supports requests up to 10mb in size.", e) raise ProtocolError(e) except urlfetch.DownloadError as e: if 'Too many redirects' in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" "responses up to 32mb in size.", e) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( "URLFetch does not support method: %s" % method, e) http_response = self._urlfetch_response_to_http_response( response, **response_kw) # Check for redirect response if (http_response.get_redirect_location() and retries.raise_on_redirect and redirect): raise MaxRetryError(self, url, "too many redirects") # Check if we should retry the HTTP response. if retries.is_forced_retry(method, status_code=http_response.status): retries = retries.increment( method, url, response=http_response, _pool=self) log.info("Forced retry: %s", url) retries.sleep() return self.urlopen( method, url, body=body, headers=headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw) return http_response def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. content_encoding = urlfetch_resp.headers.get('content-encoding') if content_encoding == 'deflate': del urlfetch_resp.headers['content-encoding'] transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. if transfer_encoding == 'chunked': encodings = transfer_encoding.split(",") encodings.remove('chunked') urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) return HTTPResponse( # In order for decoding to work, we must present the content as # a file-like object. body=BytesIO(urlfetch_resp.content), headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, **response_kw ) def _get_absolute_timeout(self, timeout): if timeout is Timeout.DEFAULT_TIMEOUT: return 5 # 5s is the default timeout for URLFetch. if isinstance(timeout, Timeout): if timeout._read is not timeout._connect: warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total timeout.", AppEnginePlatformWarning) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): retries = Retry.from_int( retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", AppEnginePlatformWarning) return retries def is_appengine(): return (is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()) def is_appengine_sandbox(): return is_appengine() and not is_prod_appengine_mvms() def is_local_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Development/' in os.environ['SERVER_SOFTWARE']) def is_prod_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and not is_prod_appengine_mvms()) def is_prod_appengine_mvms(): return os.environ.get('GAE_VM', False) == 'true'
dasmithii/Stone
refs/heads/master
setup.py
1
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name = "stone", version = "0.0.1", author = "Adam Smith", author_email = "dsmith2@wpi.edu", description = ("anonymous publishing via Tor onto the Blockchain"), license = "WTFPL", keywords = ['Tor', 'Bitcoin', 'BTC', 'anonymous', 'publishing', 'write'], packages=['stone'], long_description=read('README'), entry_points = { 'console_scripts': ['stone=stone.main:main'] } )
atulc007/sublime-github
refs/heads/master
lib/requests/api.py
637
# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() return session.request(method=method, url=url, **kwargs) def get(url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('get', url, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('post', url, data=data, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('delete', url, **kwargs)
peterm-itr/edx-platform
refs/heads/master
common/djangoapps/user_api/tests/test_constants.py
32
"""Constants used in the test suite. """ SORTED_COUNTRIES = [ (u'AF', u'Afghanistan'), (u'AL', u'Albania'), (u'DZ', u'Algeria'), (u'AS', u'American Samoa'), (u'AD', u'Andorra'), (u'AO', u'Angola'), (u'AI', u'Anguilla'), (u'AQ', u'Antarctica'), (u'AG', u'Antigua and Barbuda'), (u'AR', u'Argentina'), (u'AM', u'Armenia'), (u'AW', u'Aruba'), (u'AU', u'Australia'), (u'AT', u'Austria'), (u'AZ', u'Azerbaijan'), (u'BS', u'Bahamas'), (u'BH', u'Bahrain'), (u'BD', u'Bangladesh'), (u'BB', u'Barbados'), (u'BY', u'Belarus'), (u'BE', u'Belgium'), (u'BZ', u'Belize'), (u'BJ', u'Benin'), (u'BM', u'Bermuda'), (u'BT', u'Bhutan'), (u'BO', u'Bolivia, Plurinational State of'), (u'BQ', u'Bonaire, Sint Eustatius and Saba'), (u'BA', u'Bosnia and Herzegovina'), (u'BW', u'Botswana'), (u'BV', u'Bouvet Island'), (u'BR', u'Brazil'), (u'IO', u'British Indian Ocean Territory'), (u'BN', u'Brunei Darussalam'), (u'BG', u'Bulgaria'), (u'BF', u'Burkina Faso'), (u'BI', u'Burundi'), (u'KH', u'Cambodia'), (u'CM', u'Cameroon'), (u'CA', u'Canada'), (u'CV', u'Cape Verde'), (u'KY', u'Cayman Islands'), (u'CF', u'Central African Republic'), (u'TD', u'Chad'), (u'CL', u'Chile'), (u'CN', u'China'), (u'CX', u'Christmas Island'), (u'CC', u'Cocos (Keeling) Islands'), (u'CO', u'Colombia'), (u'KM', u'Comoros'), (u'CG', u'Congo'), (u'CD', u'Congo (the Democratic Republic of the)'), (u'CK', u'Cook Islands'), (u'CR', u'Costa Rica'), (u'HR', u'Croatia'), (u'CU', u'Cuba'), (u'CW', u'Cura\xe7ao'), (u'CY', u'Cyprus'), (u'CZ', u'Czech Republic'), (u'CI', u"C\xf4te d'Ivoire"), (u'DK', u'Denmark'), (u'DJ', u'Djibouti'), (u'DM', u'Dominica'), (u'DO', u'Dominican Republic'), (u'EC', u'Ecuador'), (u'EG', u'Egypt'), (u'SV', u'El Salvador'), (u'GQ', u'Equatorial Guinea'), (u'ER', u'Eritrea'), (u'EE', u'Estonia'), (u'ET', u'Ethiopia'), (u'FK', u'Falkland Islands [Malvinas]'), (u'FO', u'Faroe Islands'), (u'FJ', u'Fiji'), (u'FI', u'Finland'), (u'FR', u'France'), (u'GF', u'French Guiana'), (u'PF', u'French Polynesia'), (u'TF', u'French Southern Territories'), (u'GA', u'Gabon'), (u'GM', u'Gambia (The)'), (u'GE', u'Georgia'), (u'DE', u'Germany'), (u'GH', u'Ghana'), (u'GI', u'Gibraltar'), (u'GR', u'Greece'), (u'GL', u'Greenland'), (u'GD', u'Grenada'), (u'GP', u'Guadeloupe'), (u'GU', u'Guam'), (u'GT', u'Guatemala'), (u'GG', u'Guernsey'), (u'GN', u'Guinea'), (u'GW', u'Guinea-Bissau'), (u'GY', u'Guyana'), (u'HT', u'Haiti'), (u'HM', u'Heard Island and McDonald Islands'), (u'VA', u'Holy See [Vatican City State]'), (u'HN', u'Honduras'), (u'HK', u'Hong Kong'), (u'HU', u'Hungary'), (u'IS', u'Iceland'), (u'IN', u'India'), (u'ID', u'Indonesia'), (u'IR', u'Iran (the Islamic Republic of)'), (u'IQ', u'Iraq'), (u'IE', u'Ireland'), (u'IM', u'Isle of Man'), (u'IL', u'Israel'), (u'IT', u'Italy'), (u'JM', u'Jamaica'), (u'JP', u'Japan'), (u'JE', u'Jersey'), (u'JO', u'Jordan'), (u'KZ', u'Kazakhstan'), (u'KE', u'Kenya'), (u'KI', u'Kiribati'), (u'KP', u"Korea (the Democratic People's Republic of)"), (u'KR', u'Korea (the Republic of)'), (u'KW', u'Kuwait'), (u'KG', u'Kyrgyzstan'), (u'LA', u"Lao People's Democratic Republic"), (u'LV', u'Latvia'), (u'LB', u'Lebanon'), (u'LS', u'Lesotho'), (u'LR', u'Liberia'), (u'LY', u'Libya'), (u'LI', u'Liechtenstein'), (u'LT', u'Lithuania'), (u'LU', u'Luxembourg'), (u'MO', u'Macao'), (u'MK', u'Macedonia (the former Yugoslav Republic of)'), (u'MG', u'Madagascar'), (u'MW', u'Malawi'), (u'MY', u'Malaysia'), (u'MV', u'Maldives'), (u'ML', u'Mali'), (u'MT', u'Malta'), (u'MH', u'Marshall Islands'), (u'MQ', u'Martinique'), (u'MR', u'Mauritania'), (u'MU', u'Mauritius'), (u'YT', u'Mayotte'), (u'MX', u'Mexico'), (u'FM', u'Micronesia (the Federated States of)'), (u'MD', u'Moldova (the Republic of)'), (u'MC', u'Monaco'), (u'MN', u'Mongolia'), (u'ME', u'Montenegro'), (u'MS', u'Montserrat'), (u'MA', u'Morocco'), (u'MZ', u'Mozambique'), (u'MM', u'Myanmar'), (u'NA', u'Namibia'), (u'NR', u'Nauru'), (u'NP', u'Nepal'), (u'NL', u'Netherlands'), (u'NC', u'New Caledonia'), (u'NZ', u'New Zealand'), (u'NI', u'Nicaragua'), (u'NE', u'Niger'), (u'NG', u'Nigeria'), (u'NU', u'Niue'), (u'NF', u'Norfolk Island'), (u'MP', u'Northern Mariana Islands'), (u'NO', u'Norway'), (u'OM', u'Oman'), (u'PK', u'Pakistan'), (u'PW', u'Palau'), (u'PS', u'Palestine, State of'), (u'PA', u'Panama'), (u'PG', u'Papua New Guinea'), (u'PY', u'Paraguay'), (u'PE', u'Peru'), (u'PH', u'Philippines'), (u'PN', u'Pitcairn'), (u'PL', u'Poland'), (u'PT', u'Portugal'), (u'PR', u'Puerto Rico'), (u'QA', u'Qatar'), (u'RO', u'Romania'), (u'RU', u'Russian Federation'), (u'RW', u'Rwanda'), (u'RE', u'R\xe9union'), (u'BL', u'Saint Barth\xe9lemy'), (u'SH', u'Saint Helena, Ascension and Tristan da Cunha'), (u'KN', u'Saint Kitts and Nevis'), (u'LC', u'Saint Lucia'), (u'MF', u'Saint Martin (French part)'), (u'PM', u'Saint Pierre and Miquelon'), (u'VC', u'Saint Vincent and the Grenadines'), (u'WS', u'Samoa'), (u'SM', u'San Marino'), (u'ST', u'Sao Tome and Principe'), (u'SA', u'Saudi Arabia'), (u'SN', u'Senegal'), (u'RS', u'Serbia'), (u'SC', u'Seychelles'), (u'SL', u'Sierra Leone'), (u'SG', u'Singapore'), (u'SX', u'Sint Maarten (Dutch part)'), (u'SK', u'Slovakia'), (u'SI', u'Slovenia'), (u'SB', u'Solomon Islands'), (u'SO', u'Somalia'), (u'ZA', u'South Africa'), (u'GS', u'South Georgia and the South Sandwich Islands'), (u'SS', u'South Sudan'), (u'ES', u'Spain'), (u'LK', u'Sri Lanka'), (u'SD', u'Sudan'), (u'SR', u'Suriname'), (u'SJ', u'Svalbard and Jan Mayen'), (u'SZ', u'Swaziland'), (u'SE', u'Sweden'), (u'CH', u'Switzerland'), (u'SY', u'Syrian Arab Republic'), (u'TW', u'Taiwan'), (u'TJ', u'Tajikistan'), (u'TZ', u'Tanzania, United Republic of'), (u'TH', u'Thailand'), (u'TL', u'Timor-Leste'), (u'TG', u'Togo'), (u'TK', u'Tokelau'), (u'TO', u'Tonga'), (u'TT', u'Trinidad and Tobago'), (u'TN', u'Tunisia'), (u'TR', u'Turkey'), (u'TM', u'Turkmenistan'), (u'TC', u'Turks and Caicos Islands'), (u'TV', u'Tuvalu'), (u'UG', u'Uganda'), (u'UA', u'Ukraine'), (u'AE', u'United Arab Emirates'), (u'GB', u'United Kingdom'), (u'US', u'United States'), (u'UM', u'United States Minor Outlying Islands'), (u'UY', u'Uruguay'), (u'UZ', u'Uzbekistan'), (u'VU', u'Vanuatu'), (u'VE', u'Venezuela, Bolivarian Republic of'), (u'VN', u'Viet Nam'), (u'VG', u'Virgin Islands (British)'), (u'VI', u'Virgin Islands (U.S.)'), (u'WF', u'Wallis and Futuna'), (u'EH', u'Western Sahara'), (u'YE', u'Yemen'), (u'ZM', u'Zambia'), (u'ZW', u'Zimbabwe'), (u'AX', u'\xc5land Islands') ]
eric8810/openwrt
refs/heads/master
tools/b43-tools/files/b43-fwsquash.py
497
#!/usr/bin/env python # # b43 firmware file squasher # Removes unnecessary firmware files # # Copyright (c) 2009 Michael Buesch <mb@bu3sch.de> # # Licensed under the GNU/GPL version 2 or (at your option) any later version. # import sys import os def usage(): print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0]) print("") print("PHYTYPES is a comma separated list of:") print("A => A-PHY") print("AG => Dual A-PHY G-PHY") print("G => G-PHY") print("LP => LP-PHY") print("N => N-PHY") print("HT => HT-PHY") print("LCN => LCN-PHY") print("LCN40 => LCN40-PHY") print("AC => AC-PHY") print("") print("COREREVS is a comma separated list of core revision numbers.") if len(sys.argv) != 4: usage() sys.exit(1) phytypes = sys.argv[1] corerevs = sys.argv[2] fwpath = sys.argv[3] phytypes = phytypes.split(',') try: corerevs = map(lambda r: int(r), corerevs.split(',')) except ValueError: print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs) usage() sys.exit(1) fwfiles = os.listdir(fwpath) fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles) if not fwfiles: print("ERROR: No firmware files found in %s" % fwpath) sys.exit(1) required_fwfiles = [] def revs_match(revs_a, revs_b): for rev in revs_a: if rev in revs_b: return True return False def phytypes_match(types_a, types_b): for type in types_a: type = type.strip().upper() if type in types_b: return True return False revmapping = { "ucode2.fw" : ( (2,3,), ("G",), ), "ucode4.fw" : ( (4,), ("G",), ), "ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ), "ucode11.fw" : ( (11,12,), ("N",), ), "ucode13.fw" : ( (13,), ("LP","G",), ), "ucode14.fw" : ( (14,), ("LP",), ), "ucode15.fw" : ( (15,), ("LP",), ), "ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ), # "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ), "ucode24_lcn.fw" : ( (24,), ("LCN",), ), "ucode25_mimo.fw" : ( (25,28,), ("N",), ), "ucode25_lcn.fw" : ( (25,28,), ("LCN",), ), "ucode26_mimo.fw" : ( (26,), ("HT",), ), "ucode29_mimo.fw" : ( (29,), ("HT",), ), "ucode30_mimo.fw" : ( (30,), ("N",), ), "ucode33_lcn40.fw" : ( (33,), ("LCN40",), ), "ucode40.fw" : ( (40,), ("AC",), ), "ucode42.fw" : ( (42,), ("AC",), ), "pcm4.fw" : ( (1,2,3,4,), ("G",), ), "pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ), } initvalmapping = { "a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ), "a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ), "b0g0initvals2.fw" : ( (2,4,), ("G",), ), "b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ), "b0g0initvals13.fw" : ( (13,), ("G",), ), "n0initvals11.fw" : ( (11,12,), ("N",), ), "n0initvals16.fw" : ( (16,17,18,23,), ("N",), ), "n0initvals24.fw" : ( (24,), ("N",), ), "n0initvals25.fw" : ( (25,28,), ("N",), ), "n16initvals30.fw" : ( (30,), ("N",), ), "lp0initvals13.fw" : ( (13,), ("LP",), ), "lp0initvals14.fw" : ( (14,), ("LP",), ), "lp0initvals15.fw" : ( (15,), ("LP",), ), # "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ), "lcn0initvals24.fw" : ( (24,), ("LCN",), ), "ht0initvals26.fw" : ( (26,), ("HT",), ), "ht0initvals29.fw" : ( (29,), ("HT",), ), "lcn400initvals33.fw" : ( (33,), ("LCN40",), ), "ac0initvals40.fw" : ( (40,), ("AC",), ), "ac1initvals42.fw" : ( (42,), ("AC",), ), "a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ), "a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ), "b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ), "n0bsinitvals11.fw" : ( (11,12,), ("N",), ), "n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ), "n0bsinitvals24.fw" : ( (24,), ("N",), ), "n0bsinitvals25.fw" : ( (25,28,), ("N",), ), "n16bsinitvals30.fw" : ( (30,), ("N",), ), "lp0bsinitvals13.fw" : ( (13,), ("LP",), ), "lp0bsinitvals14.fw" : ( (14,), ("LP",), ), "lp0bsinitvals15.fw" : ( (15,), ("LP",), ), # "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ), "lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ), "ht0bsinitvals26.fw" : ( (26,), ("HT",), ), "ht0bsinitvals29.fw" : ( (29,), ("HT",), ), "lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ), "ac0bsinitvals40.fw" : ( (40,), ("AC",), ), "ac1bsinitvals42.fw" : ( (42,), ("AC",), ), } for f in fwfiles: if f in revmapping: if revs_match(corerevs, revmapping[f][0]) and\ phytypes_match(phytypes, revmapping[f][1]): required_fwfiles += [f] continue if f in initvalmapping: if revs_match(corerevs, initvalmapping[f][0]) and\ phytypes_match(phytypes, initvalmapping[f][1]): required_fwfiles += [f] continue print("WARNING: Firmware file %s not found in the mapping lists" % f) for f in fwfiles: if f not in required_fwfiles: print("Deleting %s" % f) os.unlink(fwpath + '/' + f)
kenshay/ImageScript
refs/heads/master
ProgramData/SystemFiles/Python/Lib/json/tests/test_pass1.py
108
from json.tests import PyTest, CTest # from http://json.org/JSON_checker/test/pass1.json JSON = r''' [ "JSON Test Pattern pass1", {"object with 1 member":["array with 1 element"]}, {}, [], -42, true, false, null, { "integer": 1234567890, "real": -9876.543210, "e": 0.123456789e-12, "E": 1.234567890E+34, "": 23456789012E66, "zero": 0, "one": 1, "space": " ", "quote": "\"", "backslash": "\\", "controls": "\b\f\n\r\t", "slash": "/ & \/", "alpha": "abcdefghijklmnopqrstuvwyz", "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", "digit": "0123456789", "0123456789": "digit", "special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?", "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", "true": true, "false": false, "null": null, "array":[ ], "object":{ }, "address": "50 St. James Street", "url": "http://www.JSON.org/", "comment": "// /* <!-- --", "# -- --> */": " ", " s p a c e d " :[1,2 , 3 , 4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7], "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", "quotes": "&#34; \u0022 %22 0x22 034 &#x22;", "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" : "A key can be any string" }, 0.5 ,98.6 , 99.44 , 1066, 1e1, 0.1e1, 1e-1, 1e00,2e+00,2e-00 ,"rosebud"] ''' class TestPass1(object): def test_parse(self): # test in/out equivalence and parsing res = self.loads(JSON) out = self.dumps(res) self.assertEqual(res, self.loads(out)) class TestPyPass1(TestPass1, PyTest): pass class TestCPass1(TestPass1, CTest): pass
darkChozo/FAbot
refs/heads/master
bot/event_manager.py
1
# -*- coding: utf-8 -*- import datetime from pytz import utc, timezone import threading class EventManager(object): def __init__(self): # TODO: Move events to config.ini self.events = ( ("The Folk ARPS Sunday Session", 6, 19, 20), ("The Folk ARPS Tuesday Session", 1, 19, 20) ) self.warnings = ( (" starts in five hours!", datetime.timedelta(hours=5)), (" starts in two hours!", datetime.timedelta(hours=2)), (" starts in thirty minutes!", datetime.timedelta(minutes=30)), (" is starting!", datetime.timedelta(0)) ) self.timezone = timezone("Europe/London") self.nextEvent = None self.timer = None self.announcement_channels = [] def handle_message(self, cli): if self.timer is None: self.find_next_event() if self.nextEvent is not None: for warning in self.warnings: if self.nextEvent[1] - warning[1] > utc.localize(datetime.datetime.utcnow()): seconds = ( self.nextEvent[1] - warning[1] - utc.localize(datetime.datetime.utcnow())).total_seconds() self.timer = threading.Timer(seconds, self.handle_timer, args=[cli, "@everyone " + self.nextEvent[0] + warning[0]]) self.timer.start() print "created " + str(seconds) + "s timer for " + str(self.nextEvent) break def handle_timer(self, cli, message): print "timer complete, printing """ + message + '"' self.timer = None for channel in self.announcement_channels: cli.send_message(cli.get_channel(channel), message) def find_next_event(self): self.nextEvent = None now = utc.localize(datetime.datetime.utcnow()) for event in self.events: t = self.timezone.localize( datetime.datetime.combine(self.next_weekday(event[1]), datetime.time(event[2], event[3]))) t = t.astimezone(utc) if t > now and (self.nextEvent is None or t < self.nextEvent[1]): self.nextEvent = (event[0], t) def next_weekday(self, weekday): d = datetime.datetime.utcnow().date() days_ahead = (weekday - d.weekday()) % 7 return d + datetime.timedelta(days_ahead) def next_event_message(self): return "Next event is {} in {} ({}).".format(self.nextEvent[0], self.format_delta(self.nextEvent[1] - utc.localize(datetime.datetime.utcnow())), self.nextEvent[1].strftime("%H:%M UTC on %B %d")) def format_delta(self, td): days, remainder = divmod(td.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) output = '' if days != 0: output += '{:.0f}'.format(days) + (' days, ' if days != 1 else ' day, ') if days != 0 or hours != 0: output += '{:.0f}'.format(hours) + (' hours and ' if hours != 1 else ' hour and ') output += '{:.0f}'.format(minutes) + (' minutes' if minutes != 1 else ' minute') return output
mopsalarm/rep0st
refs/heads/master
rep0st/download.py
1
from __future__ import division from __future__ import unicode_literals import itertools import gevent import gevent.pool import gevent.monkey import dataset import requests import logbook import pathlib from clint.textui import progress from first import first class Post(object): def __init__(self, id, created, user, flags, image): """ :param int id: The id of the post :param int created: UTC timestamp of the creation time of the post in seconds :param user: Name of the user who posted this item :param image: Linked post image """ self.id = id self.user = user self.image = image self.flags = flags self.created = created @property def animated(self): return self.image.endswith((".webm", ".gif")) @property def static(self): return self.image.endswith((".png", ".jpg")) @property def local_image(self): """Returns the local path of the posts image or video""" return pathlib.Path("images") / self.image def as_dict(self): """Converts this post into a dict. The dict can be read back using __init__""" return dict(id=self.id, user=self.user, image=self.image, flags=self.flags, created=self.created) def chunks(iterable, size=100): """Yields chunks of the given size as tuples""" it = iter(iterable) chunk = tuple(itertools.islice(it, size)) while chunk: yield chunk chunk = tuple(itertools.islice(it, size)) def iter_api_posts(start=None): """Iterates over all posts that the api provides, starting at the post witht he given id. """ at_end = False while not at_end and start != 1: # build url for next page url = "http://pr0gramm.com/api/items/get?flags=3" if start is not None: url += "&older=%d" % start # perform api request #: :type: requests.Response logbook.debug("requesting api page {}", url) response = requests.get(url) response.raise_for_status() # parse response data = response.json() at_end = data["atEnd"] # create posts for item in data.get("items", ()): post = Post(item["id"], item["created"], item["user"], item["flags"], item["image"]) start = post.id yield post def download_posts(db, start=None, tablename="posts"): """Downloads all posts and adds them to the given datasets posts table. Download will stop if an already existing post is reached. :param dataset.Database db: The dataset to use """ for chunk in chunks(iter_api_posts(start), 100): with db as tx: posts_table = tx[tablename] for post in chunk: if posts_table.find_one(id=post.id): return posts_table.insert(post.as_dict()) def download_image(post): """Downloads the image of a post, if it was not yet downloaded. :param Post post: The post to downoad the image for """ image_path = post.local_image if image_path.exists(): return url = "http://img.pr0gramm.com/" + post.image #: :type: requests.Response response = requests.get(url) response.raise_for_status() # really download the image content = response.content with create_open(image_path) as fp: fp.write(content) def create_open(path): """Ensures that the file at the given path does not exists and ensures that the parent directory is created """ path = pathlib.Path(path) parent = path.parent if not parent.exists(): parent.mkdir(parents=True) if path.exists(): raise IOError("file already exists") return path.open("wb") def db_posts(table): """Iterates over all posts in the database""" return (Post(**p) for p in table.find(order_by="-id", _step=None)) def download_images(posts): """Downloads images for the given posts""" pool = gevent.pool.Pool(size=16) for post in progress.bar(posts, width=60, every=100): if not post.static: continue pool.spawn(download_image, post) # wait for all jobs to finish pool.join() def open_dataset(): """Opens the database""" db = dataset.connect("sqlite:///rep0st.db") db.query("PRAGMA busy_timeout=2500;") return db def main(): db = open_dataset() # download new posts download_posts(db) # get the oldest post and download even older posts posts_table = db["posts"] oldest_post = first(posts_table.find(order_by="id", _limit=1)) if oldest_post and oldest_post["id"] != 1: download_posts(db, start=oldest_post["id"]) # get a list of all the posts posts = list(db_posts(db["posts"])) # download images download_images(posts) if __name__ == '__main__': gevent.monkey.patch_all() main()
dacohen/kops
refs/heads/master
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/client.py
504
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import requests import socket from urlparse import urlparse def CheckServiceAddress(address): hostname = urlparse(address).hostname service_address = socket.gethostbyname(hostname) print service_address def GetServerResponse(address): print 'Send request to:', address response = requests.get(address) print response print response.content def Main(): parser = argparse.ArgumentParser() parser.add_argument('address') args = parser.parse_args() CheckServiceAddress(args.address) GetServerResponse(args.address) if __name__ == "__main__": Main()
graphql-python/graphql-core
refs/heads/main
tests/validation/test_unique_variable_names.py
1
from functools import partial from graphql.validation import UniqueVariableNamesRule from .harness import assert_validation_errors assert_errors = partial(assert_validation_errors, UniqueVariableNamesRule) assert_valid = partial(assert_errors, errors=[]) def describe_validate_unique_variable_names(): def unique_variable_names(): assert_valid( """ query A($x: Int, $y: String) { __typename } query B($x: String, $y: Int) { __typename } """ ) def duplicate_variable_names(): assert_errors( """ query A($x: Int, $x: Int, $x: String) { __typename } query B($x: String, $x: Int) { __typename } query C($x: Int, $x: Int) { __typename } """, [ { "message": "There can be only one variable named '$x'.", "locations": [(2, 22), (2, 31)], }, { "message": "There can be only one variable named '$x'.", "locations": [(2, 22), (2, 40)], }, { "message": "There can be only one variable named '$x'.", "locations": [(3, 22), (3, 34)], }, { "message": "There can be only one variable named '$x'.", "locations": [(4, 22), (4, 31)], }, ], )
googleapis/python-billing
refs/heads/master
google/cloud/billing_v1/services/cloud_catalog/client.py
1
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.billing_v1.services.cloud_catalog import pagers from google.cloud.billing_v1.types import cloud_catalog from .transports.base import CloudCatalogTransport, DEFAULT_CLIENT_INFO from .transports.grpc import CloudCatalogGrpcTransport from .transports.grpc_asyncio import CloudCatalogGrpcAsyncIOTransport class CloudCatalogClientMeta(type): """Metaclass for the CloudCatalog client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[CloudCatalogTransport]] _transport_registry["grpc"] = CloudCatalogGrpcTransport _transport_registry["grpc_asyncio"] = CloudCatalogGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[CloudCatalogTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class CloudCatalogClient(metaclass=CloudCatalogClientMeta): """A catalog of Google Cloud Platform services and SKUs. Provides pricing information and metadata on Google Cloud Platform services and SKUs. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "cloudbilling.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CloudCatalogClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CloudCatalogClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> CloudCatalogTransport: """Returns the transport used by the client instance. Returns: CloudCatalogTransport: The transport used by the client instance. """ return self._transport @staticmethod def service_path(service: str,) -> str: """Returns a fully-qualified service string.""" return "services/{service}".format(service=service,) @staticmethod def parse_service_path(path: str) -> Dict[str, str]: """Parses a service path into its component segments.""" m = re.match(r"^services/(?P<service>.+?)$", path) return m.groupdict() if m else {} @staticmethod def sku_path(service: str, sku: str,) -> str: """Returns a fully-qualified sku string.""" return "services/{service}/skus/{sku}".format(service=service, sku=sku,) @staticmethod def parse_sku_path(path: str) -> Dict[str, str]: """Parses a sku path into its component segments.""" m = re.match(r"^services/(?P<service>.+?)/skus/(?P<sku>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, CloudCatalogTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the cloud catalog client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, CloudCatalogTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, CloudCatalogTransport): # transport is a CloudCatalogTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) def list_services( self, request: cloud_catalog.ListServicesRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListServicesPager: r"""Lists all public cloud services. Args: request (google.cloud.billing_v1.types.ListServicesRequest): The request object. Request message for `ListServices`. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.billing_v1.services.cloud_catalog.pagers.ListServicesPager: Response message for ListServices. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a cloud_catalog.ListServicesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloud_catalog.ListServicesRequest): request = cloud_catalog.ListServicesRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_services] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListServicesPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def list_skus( self, request: cloud_catalog.ListSkusRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSkusPager: r"""Lists all publicly available SKUs for a given cloud service. Args: request (google.cloud.billing_v1.types.ListSkusRequest): The request object. Request message for `ListSkus`. parent (str): Required. The name of the service. Example: "services/DA34-426B-A397" This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.billing_v1.services.cloud_catalog.pagers.ListSkusPager: Response message for ListSkus. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a cloud_catalog.ListSkusRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloud_catalog.ListSkusRequest): request = cloud_catalog.ListSkusRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_skus] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListSkusPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-billing",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("CloudCatalogClient",)
LandRegistry/service-frontend-alpha
refs/heads/master
application/services/ownership.py
1
import logging import requests import uuid import json import sys from requests.exceptions import ( HTTPError, ConnectionError ) from flask import session from application import app OWNERSHIP_URL = app.config['OWNERSHIP_URL'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) def check_user_is_owner(user, title_number): logger.info("Checking title number %s ownership service at %s" %(title_number, OWNERSHIP_URL)) if 'lrid' not in session: logger.info("LRID not known for user %s so can't check ownership" % user) return False try: headers = {'Content-type': 'application/json'} data = json.dumps({"title_number":title_number}) resp = requests.post( url='%s/owners' % OWNERSHIP_URL, data=data, headers=headers) resp.raise_for_status() data = resp.json() owners = data['owners'] logger.info('Response owners %s' % owners) for match in owners: if session['lrid'] == match['lrid']: return True else: return False except (HTTPError, ConnectionError) as e: logger.info('Unable to establish ownership of %s by %s: error %s' % (title_number, user, e)) return False except: e = sys.exc_info()[0] logger.info('Unknown error checking ownership of %s by %s: %s' % (title_number, user, e)) return False
shikhardb/scikit-learn
refs/heads/master
sklearn/linear_model/stochastic_gradient.py
8
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification and regression using Stochastic Gradient Descent (SGD).""" import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from ..externals.joblib import Parallel, delayed from .base import LinearClassifierMixin, SparseCoefMixin from ..base import BaseEstimator, RegressorMixin from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import (check_array, check_random_state, check_X_y, deprecated) from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted from ..externals import six from .sgd_fast import plain_sgd, average_sgd from ..utils.seq_dataset import ArrayDataset, CSRDataset from ..utils import compute_class_weight from .sgd_fast import Hinge from .sgd_fast import SquaredHinge from .sgd_fast import Log from .sgd_fast import ModifiedHuber from .sgd_fast import SquaredLoss from .sgd_fast import Huber from .sgd_fast import EpsilonInsensitive from .sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3, "pa1": 4, "pa2": 5} PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} SPARSE_INTERCEPT_DECAY = 0.01 """For sparse data intercept updates are scaled by this decay factor to avoid intercept oscillation.""" DEFAULT_EPSILON = 0.1 """Default value of ``epsilon`` parameter. """ class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)): """Base class for SGD classification and regression.""" def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, warm_start=False, average=False): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.n_iter = n_iter self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.warm_start = warm_start self.average = average self._validate_params() self.coef_ = None if self.average > 0: self.standard_coef_ = None self.average_coef_ = None # iteration count for learning rate schedule # must not be int (e.g. if ``learning_rate=='optimal'``) self.t_ = None def set_params(self, *args, **kwargs): super(BaseSGD, self).set_params(*args, **kwargs) self._validate_params() return self @abstractmethod def fit(self, X, y): """Fit model.""" def _validate_params(self): """Validate input params. """ if not isinstance(self.shuffle, bool): raise ValueError("shuffle must be either True or False") if self.n_iter <= 0: raise ValueError("n_iter must be > zero") if not (0.0 <= self.l1_ratio <= 1.0): raise ValueError("l1_ratio must be in [0, 1]") if self.alpha < 0.0: raise ValueError("alpha must be >= 0") if self.learning_rate in ("constant", "invscaling"): if self.eta0 <= 0.0: raise ValueError("eta0 must be > 0") # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if self.loss not in self.loss_functions: raise ValueError("The loss %s is not supported. " % self.loss) def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``. """ try: loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) except KeyError: raise ValueError("The loss %s is not supported. " % loss) def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] except KeyError: raise ValueError("learning rate %s " "is not supported. " % learning_rate) def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] except KeyError: raise ValueError("Penalty %s is not supported. " % penalty) def _validate_sample_weight(self, sample_weight, n_samples): """Set the sample weight array.""" if sample_weight is None: # uniform sample weights sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: # user-provided array sample_weight = np.asarray(sample_weight, dtype=np.float64, order="C") if sample_weight.shape[0] != n_samples: raise ValueError("Shapes of X and sample_weight do not match.") return sample_weight def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order="C") # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes, ): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ for binary problem if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not " "match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ for binary problem if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init.reshape(1,) else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order="C") def _make_dataset(X, y_i, sample_weight): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. """ if sp.issparse(X): dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight) intercept_decay = SPARSE_INTERCEPT_DECAY else: dataset = ArrayDataset(X, y_i, sample_weight) intercept_decay = 1.0 return dataset, intercept_decay def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est.standard_coef_.ravel() intercept = est.standard_intercept_[0] average_coef = est.average_coef_.ravel() average_intercept = est.average_intercept_[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est.standard_coef_[i] intercept = est.standard_intercept_[i] average_coef = est.average_coef_[i] average_intercept = est.average_intercept_[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter, pos_weight, neg_weight, sample_weight): """Fit a single binary classifier. The i'th class is considered the "positive" class. """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = \ _prepare_fit_binary(est, y, i) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] dataset, intercept_decay = _make_dataset(X, y_i, sample_weight) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) # XXX should have random_state_! random_state = check_random_state(est.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if not est.average: return plain_sgd(coef, intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay) else: standard_coef, standard_intercept, average_coef, \ average_intercept = average_sgd(coef, intercept, average_coef, average_intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay, est.average) if len(est.classes_) == 2: est.average_intercept_[0] = average_intercept else: est.average_intercept_[i] = average_intercept return standard_coef, standard_intercept class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD, LinearClassifierMixin)): loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log": (Log, ), "modified_huber": (ModifiedHuber, ), "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) self.class_weight = class_weight self.classes_ = None self.n_jobs = int(n_jobs) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, classes, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape self._validate_params() _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight(self.class_weight, self.classes_, y) sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None or coef_init is not None: self._allocate_parameter_mem(n_classes, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) self.loss_function = self._get_loss_function(loss) if self.t_ is None: self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) elif n_classes == 2: self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) else: raise ValueError("The number of class labels must be " "greater than one.") return self def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if hasattr(self, "classes_"): self.classes_ = None X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class classes = np.unique(y) if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, classes, sample_weight, coef_init, intercept_init) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, n_iter): """Fit a binary classifier on X and y. """ coef, intercept = fit_binary(self, 1, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += n_iter * X.shape[0] # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self.average_coef_.reshape(1, -1) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, -1) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, n_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All. """ # Use joblib to fit OvA in parallel. result = Parallel(n_jobs=self.n_jobs, backend="threading", verbose=self.verbose)( delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[i], 1., sample_weight) for i in range(len(self.classes_))) for i, (_, intercept) in enumerate(result): self.intercept_[i] = intercept self.t_ += n_iter * X.shape[0] if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ def partial_fit(self, X, y, classes=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ if self.class_weight == 'auto': raise ValueError("class_weight 'auto' is not supported for " "partial_fit. In order to use 'auto' weights, " "use compute_class_weight('auto', classes, y). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.") return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the contructor) if class_weight is specified Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin): """Linear classifiers (SVM, logistic regression, a.o.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning, see the partial_fit method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Parameters ---------- loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\ 'perceptron', or a regression loss: 'squared_loss', 'huber',\ 'epsilon_insensitive', or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'hinge', which gives a linear SVM. The 'log' loss gives logistic regression, a probabilistic classifier. 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. 'squared_hinge' is like hinge but is quadratically penalized. 'perceptron' is the linear loss used by the perceptron algorithm. The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. learning_rate : string, optional The learning rate schedule: constant: eta = eta0 optimal: eta = 1.0 / (t + t0) [default] invscaling: eta = eta0 / pow(t, power_t) where t0 is chosen by a heuristic proposed by Leon Bottou. eta0 : double The initial learning rate for the 'constant' or 'invscaling' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. class_weight : dict, {class_label: weight} or "auto" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "auto" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\ n_features) Weights assigned to the features. intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> clf = linear_model.SGDClassifier() >>> clf.fit(X, Y) ... #doctest: +NORMALIZE_WHITESPACE SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=True, verbose=0, warm_start=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- LinearSVC, LogisticRegression, Perceptron """ def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(SGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, class_weight=class_weight, warm_start=warm_start, average=average) def _check_proba(self): check_is_fitted(self, "t_") if self.loss not in ("log", "modified_huber"): raise AttributeError("probability estimates are not available for" " loss=%r" % self.loss) @property def predict_proba(self): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ self._check_proba() return self._predict_proba def _predict_proba(self, X): if self.loss == "log": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = (len(self.classes_) == 2) scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1. prob /= 2. if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = (prob_sum == 0) if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError("predict_(log_)proba only supported when" " loss='log' or loss='modified_huber' " "(%r given)" % self.loss) @property def predict_log_proba(self): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) class BaseSGDRegressor(BaseSGD, RegressorMixin): loss_functions = { "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64) y = y.astype(np.float64) n_samples, n_features = X.shape self._validate_params() # Allocate datastructures from input arguments sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) if self.average > 0 and self.average_coef_ is None: self.average_coef_ = np.zeros(n_features, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor(X, y, alpha, C, loss, learning_rate, sample_weight, n_iter) return self def partial_fit(self, X, y, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None) def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_intercept_ = self.intercept_ self.standard_coef_ = self.coef_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None return self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, sample_weight, coef_init, intercept_init) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self.decision_function(X) def _fit_regressor(self, X, y, alpha, C, loss, learning_rate, sample_weight, n_iter): dataset, intercept_decay = _make_dataset(X, y, sample_weight) loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if self.t_ is None: self.t_ = 1.0 random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if self.average > 0: self.standard_coef_, self.standard_intercept_, \ self.average_coef_, self.average_intercept_ =\ average_sgd(self.standard_coef_, self.standard_intercept_[0], self.average_coef_, self.average_intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay, self.average) self.average_intercept_ = np.atleast_1d(self.average_intercept_) self.standard_intercept_ = np.atleast_1d(self.standard_intercept_) self.t_ += n_iter * X.shape[0] if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.intercept_ = self.standard_intercept_ else: self.coef_, self.intercept_ = \ plain_sgd(self.coef_, self.intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay) self.t_ += n_iter * X.shape[0] self.intercept_ = np.atleast_1d(self.intercept_) class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Parameters ---------- loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \ or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'squared_loss' which refers to the ordinary least squares fit. 'huber' modifies 'squared_loss' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level. epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(alpha * t) invscaling: eta = eta0 / pow(t, power_t) [default] eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10 will`` begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (n_features,) Weights assigned to the features. intercept_ : array, shape (1,) The intercept term. `average_coef_` : array, shape (n_features,) Averaged weights assigned to the features. `average_intercept_` : array, shape (1,) The averaged intercept term. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.SGDRegressor() >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25, random_state=None, shuffle=True, verbose=0, warm_start=False) See also -------- Ridge, ElasticNet, Lasso, SVR """ def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(SGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average)
lsjostro/pulp_win
refs/heads/master
extensions_admin/test/unit/extensions/admin/upload/test_package.py
2
import os import shutil import tempfile import mock from pulp.bindings.responses import Task from ....testbase import PulpClientTests from pulp_win.extensions.admin.upload import package from pulp_win.common.ids import TYPE_ID_MSI, TYPE_ID_MSM class CreatePackageCommandTests(PulpClientTests): def setUp(self): super(CreatePackageCommandTests, self).setUp() self.upload_manager = mock.MagicMock() self.command = package._CreatePackageCommand( self.context, self.upload_manager) self.work_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) super(CreatePackageCommandTests, self).tearDown() def test_matching_files_in_dir(self): self.command.suffix = '.something' open(os.path.join(self.work_dir, 'foo.something'), "w") open(os.path.join(self.work_dir, 'foo.somethingelse'), "w") unit_files = self.command.matching_files_in_dir(self.work_dir) self.assertEqual(['foo.something'], [os.path.basename(x) for x in unit_files]) def test_succeeded(self): self.command.prompt = mock.Mock() task = Task({}) self.command.succeeded(task) self.assertTrue(self.command.prompt.render_success_message.called) def test_succeeded_error_in_result(self): self.command.prompt = mock.Mock() task = Task({'result': {'details': {'errors': ['foo']}}}) self.command.succeeded(task) self.assertTrue(self.command.prompt.render_failure_message.called) class CreateMsiCommandTests(PulpClientTests): def setUp(self): super(CreateMsiCommandTests, self).setUp() self.upload_manager = mock.MagicMock() self.command = package.CreateMsiCommand( self.context, self.upload_manager) self.work_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) super(CreateMsiCommandTests, self).tearDown() def test_structure(self): self.assertTrue(isinstance(self.command, package._CreatePackageCommand)) self.assertEqual(self.command.name, package.NAME_MSI) self.assertEqual(self.command.description, package.DESC_MSI) self.assertEqual(self.command.suffix, package.SUFFIX_MSI) self.assertEqual(self.command.type_id, TYPE_ID_MSI) def test_generate_unit_key_and_metadata(self): unit_key, metadata = self.command.generate_unit_key_and_metadata( __file__) self.assertEqual({}, unit_key) self.assertEqual({}, metadata) class CreateMsmCommandTests(PulpClientTests): def setUp(self): super(CreateMsmCommandTests, self).setUp() self.upload_manager = mock.MagicMock() self.command = package.CreateMsmCommand( self.context, self.upload_manager) self.work_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) super(CreateMsmCommandTests, self).tearDown() def test_structure(self): self.assertTrue(isinstance(self.command, package._CreatePackageCommand)) self.assertEqual(self.command.name, package.NAME_MSM) self.assertEqual(self.command.description, package.DESC_MSM) self.assertEqual(self.command.suffix, package.SUFFIX_MSM) self.assertEqual(self.command.type_id, TYPE_ID_MSM) def test_generate_unit_key_and_metadata(self): unit_key, metadata = self.command.generate_unit_key_and_metadata( __file__) self.assertEqual(unit_key, {}) self.assertEqual(metadata, {})
ganeshnalawade/ansible-modules-core
refs/heads/devel
network/nxos/nxos_hsrp.py
8
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: nxos_hsrp version_added: "2.2" short_description: Manages HSRP configuration on NX-OS switches. description: - Manages HSRP configuration on NX-OS switches. extends_documentation_fragment: nxos author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - HSRP feature needs to be enabled first on the system. - SVIs must exist before using this module. - Interface must be a L3 port before using this module. - HSRP cannot be configured on loopback interfaces. - MD5 authentication is only possible with HSRPv2 while it is ignored if HSRPv1 is used instead, while it will not raise any error. Here we allow MD5 authentication only with HSRPv2 in order to enforce better practice. options: group: description: - HSRP group number. required: true interface: description: - Full name of interface that is being managed for HSRP. required: true version: description: - HSRP version. required: false default: 2 choices: ['1','2'] priority: description: - HSRP priority. required: false default: null vip: description: - HSRP virtual IP address. required: false default: null auth_string: description: - Authentication string. required: false default: null auth_type: description: - Authentication type. required: false default: null choices: ['text','md5'] state: description: - Specify desired state of the resource. required: false choices: ['present','absent'] default: 'present' ''' EXAMPLES = ''' - name: Ensure HSRP is configured with following params on a SVI nxos_hsrp: group: 10 vip: 10.1.1.1 priority: 150 interface: vlan10 preempt: enabled host: 68.170.147.165 - name: Ensure HSRP is configured with following params on a SVI nxos_hsrp: group: 10 vip: 10.1.1.1 priority: 150 interface: vlan10 preempt: enabled host: 68.170.147.165 auth_type: text auth_string: CISCO - name: Remove HSRP config for given interface, group, and VIP nxos_hsrp: group: 10 interface: vlan10 vip: 10.1.1.1 host: 68.170.147.165 state: absent ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"group": "30", "version": "2", "vip": "10.30.1.1"} existing: description: k/v pairs of existing hsrp info on the interface type: dict sample: {} end_state: description: k/v pairs of hsrp after module execution returned: always type: dict sample: {"auth_string": "cisco", "auth_type": "text", "group": "30", "interface": "vlan10", "preempt": "disabled", "priority": "100", "version": "2", "vip": "10.30.1.1"} updates: description: commands sent to the device returned: always type: list sample: ["interface vlan10", "hsrp version 2", "hsrp 30", "ip 10.30.1.1"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import json # COMMON CODE FOR MIGRATION import ansible.module_utils.nxos from ansible.module_utils.basic import get_exception from ansible.module_utils.netcfg import NetworkConfig, ConfigLine from ansible.module_utils.shell import ShellError from ansible.module_utils.network import NetworkModule def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class CustomNetworkConfig(NetworkConfig): def expand_section(self, configobj, S=None): if S is None: S = list() S.append(configobj) for child in configobj.children: if child in S: continue self.expand_section(child, S) return S def get_object(self, path): for item in self.items: if item.text == path[-1]: parents = [p.text for p in item.parents] if parents == path[:-1]: return item def to_block(self, section): return '\n'.join([item.raw for item in section]) def get_section(self, path): try: section = self.get_section_objects(path) return self.to_block(section) except ValueError: return list() def get_section_objects(self, path): if not isinstance(path, list): path = [path] obj = self.get_object(path) if not obj: raise ValueError('path does not exist in config') return self.expand_section(obj) def add(self, lines, parents=None): """Adds one or lines of configuration """ ancestors = list() offset = 0 obj = None ## global config command if not parents: for line in to_list(lines): item = ConfigLine(line) item.raw = line if item not in self.items: self.items.append(item) else: for index, p in enumerate(parents): try: i = index + 1 obj = self.get_section_objects(parents[:i])[0] ancestors.append(obj) except ValueError: # add parent to config offset = index * self.indent obj = ConfigLine(p) obj.raw = p.rjust(len(p) + offset) if ancestors: obj.parents = list(ancestors) ancestors[-1].children.append(obj) self.items.append(obj) ancestors.append(obj) # add child objects for line in to_list(lines): # check if child already exists for child in ancestors[-1].children: if child.text == line: break else: offset = len(parents) * self.indent item = ConfigLine(line) item.raw = line.rjust(len(line) + offset) item.parents = ancestors ancestors[-1].children.append(item) self.items.append(item) def get_network_module(**kwargs): try: return get_module(**kwargs) except NameError: return NetworkModule(**kwargs) def get_config(module, include_defaults=False): config = module.params['config'] if not config: try: config = module.get_config() except AttributeError: defaults = module.params['include_defaults'] config = module.config.get_config(include_defaults=defaults) return CustomNetworkConfig(indent=2, contents=config) def load_config(module, candidate): config = get_config(module) commands = candidate.difference(config) commands = [str(c).strip() for c in commands] save_config = module.params['save'] result = dict(changed=False) if commands: if not module.check_mode: try: module.configure(commands) except AttributeError: module.config(commands) if save_config: try: module.config.save_config() except AttributeError: module.execute(['copy running-config startup-config']) result['changed'] = True result['updates'] = commands return result # END OF COMMON CODE def execute_config_command(commands, module): try: output = module.configure(commands) except ShellError: clie = get_exception() module.fail_json(msg='Error sending CLI commands', error=str(clie), commands=commands) except AttributeError: try: commands.insert(0, 'configure') module.cli.add_commands(commands, output='config') output = module.cli.run_commands() except ShellError: clie = get_exception() module.fail_json(msg='Error sending CLI commands', error=str(clie), commands=commands) return output def get_cli_body_ssh(command, response, module): """Get response for when transport=cli. This is kind of a hack and mainly needed because these modules were originally written for NX-API. And not every command supports "| json" when using cli/ssh. As such, we assume if | json returns an XML string, it is a valid command, but that the resource doesn't exist yet. Instead, the output will be a raw string when issuing commands containing 'show run'. """ if 'xml' in response[0]: body = [] elif 'show run' in command: body = response else: try: response = response[0].replace(command + '\n\n', '').strip() body = [json.loads(response)] except ValueError: module.fail_json(msg='Command does not support JSON output', command=command) return body def execute_show(cmds, module, command_type=None): command_type_map = { 'cli_show': 'json', 'cli_show_ascii': 'text' } try: if command_type: response = module.execute(cmds, command_type=command_type) else: response = module.execute(cmds) except ShellError: clie = get_exception() module.fail_json(msg='Error sending {0}'.format(cmds), error=str(clie)) except AttributeError: try: if command_type: command_type = command_type_map.get(command_type) module.cli.add_commands(cmds, output=command_type) response = module.cli.run_commands() else: module.cli.add_commands(cmds, raw=True) response = module.cli.run_commands() except ShellError: clie = get_exception() module.fail_json(msg='Error sending {0}'.format(cmds), error=str(clie)) return response def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': command += ' | json' cmds = [command] response = execute_show(cmds, module) body = get_cli_body_ssh(command, response, module) elif module.params['transport'] == 'nxapi': cmds = [command] body = execute_show(cmds, module, command_type=command_type) return body def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = str(value) else: new_dict[new_key] = value return new_dict def get_interface_type(interface): if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' else: return 'unknown' def get_interface_mode(interface, intf_type, module): command = 'show interface {0}'.format(interface) interface = {} mode = 'unknown' if intf_type in ['ethernet', 'portchannel']: body = execute_show_command(command, module)[0] interface_table = body['TABLE_interface']['ROW_interface'] mode = str(interface_table.get('eth_mode', 'layer3')) if mode == 'access' or mode == 'trunk': mode = 'layer2' elif intf_type == 'svi': mode = 'layer3' return mode def get_hsrp_groups_on_interfaces(device, module): command = 'show hsrp all' body = execute_show_command(command, module) hsrp = {} try: get_data = body[0]['TABLE_grp_detail']['ROW_grp_detail'] except (KeyError, AttributeError): return {} for entry in get_data: interface = str(entry['sh_if_index'].lower()) value = hsrp.get(interface, 'new') if value == 'new': hsrp[interface] = [] group = str(entry['sh_group_num']) hsrp[interface].append(group) return hsrp def get_hsrp_group(group, interface, module): command = 'show hsrp group {0}'.format(group) body = execute_show_command(command, module) hsrp = {} hsrp_key = { 'sh_if_index': 'interface', 'sh_group_num': 'group', 'sh_group_version': 'version', 'sh_cfg_prio': 'priority', 'sh_preempt': 'preempt', 'sh_vip': 'vip', 'sh_authentication_type': 'auth_type', 'sh_authentication_data': 'auth_string' } try: hsrp_table = body[0]['TABLE_grp_detail']['ROW_grp_detail'] except (AttributeError, IndexError, TypeError): return {} if isinstance(hsrp_table, dict): hsrp_table = [hsrp_table] for hsrp_group in hsrp_table: parsed_hsrp = apply_key_map(hsrp_key, hsrp_group) parsed_hsrp['interface'] = parsed_hsrp['interface'].lower() if parsed_hsrp['version'] == 'v1': parsed_hsrp['version'] = '1' elif parsed_hsrp['version'] == 'v2': parsed_hsrp['version'] = '2' if parsed_hsrp['interface'] == interface: return parsed_hsrp return hsrp def get_commands_remove_hsrp(group, interface): commands = [] commands.append('interface {0}'.format(interface)) commands.append('no hsrp {0}'.format(group)) return commands def get_commands_config_hsrp(delta, interface, args): commands = [] config_args = { 'group': 'hsrp {group}', 'priority': 'priority {priority}', 'preempt': '{preempt}', 'vip': 'ip {vip}' } preempt = delta.get('preempt', None) group = delta.get('group', None) if preempt: if preempt == 'enabled': delta['preempt'] = 'preempt' elif preempt == 'disabled': delta['preempt'] = 'no preempt' for key, value in delta.iteritems(): command = config_args.get(key, 'DNE').format(**delta) if command and command != 'DNE': if key == 'group': commands.insert(0, command) else: commands.append(command) command = None auth_type = delta.get('auth_type', None) auth_string = delta.get('auth_string', None) if auth_type or auth_string: if not auth_type: auth_type = args['auth_type'] elif not auth_string: auth_string = args['auth_string'] if auth_type == 'md5': command = 'authentication md5 key-string {0}'.format(auth_string) commands.append(command) elif auth_type == 'text': command = 'authentication text {0}'.format(auth_string) commands.append(command) if commands and not group: commands.insert(0, 'hsrp {0}'.format(args['group'])) version = delta.get('version', None) if version: if version == '2': command = 'hsrp version 2' elif version == '1': command = 'hsrp version 1' commands.insert(0, command) commands.insert(0, 'interface {0}'.format(interface)) if commands: if not commands[0].startswith('interface'): commands.insert(0, 'interface {0}'.format(interface)) return commands def is_default(interface, module): command = 'show run interface {0}'.format(interface) try: body = execute_show_command(command, module)[0] if 'invalid' in body.lower(): return 'DNE' else: raw_list = body.split('\n') if raw_list[-1].startswith('interface'): return True else: return False except (KeyError): return 'DNE' def validate_config(body, vip, module): new_body = ''.join(body) if "invalid ip address" in new_body.lower(): module.fail_json(msg="Invalid VIP. Possible duplicate IP address.", vip=vip) def validate_params(param, module): value = module.params[param] version = module.params['version'] if param == 'group': try: if (int(value) < 0 or int(value) > 255) and version == '1': raise ValueError elif int(value) < 0 or int(value) > 4095: raise ValueError except ValueError: module.fail_json(msg="Warning! 'group' must be an integer between" " 0 and 255 when version 1 and up to 4095 " "when version 2.", group=value, version=version) elif param == 'priority': try: if (int(value) < 0 or int(value) > 255): raise ValueError except ValueError: module.fail_json(msg="Warning! 'priority' must be an integer " "between 0 and 255", priority=value) def main(): argument_spec = dict( group=dict(required=True, type='str'), interface=dict(required=True), version=dict(choices=['1', '2'], default='2', required=False), priority=dict(type='str', required=False), preempt=dict(type='str', choices=['disabled', 'enabled'], required=False), vip=dict(type='str', required=False), auth_type=dict(choices=['text', 'md5'], required=False), auth_string=dict(type='str', required=False), state=dict(choices=['absent', 'present'], required=False, default='present'), include_defaults=dict(default=True), config=dict(), save=dict(type='bool', default=False) ) module = get_network_module(argument_spec=argument_spec, supports_check_mode=True) interface = module.params['interface'].lower() group = module.params['group'] version = module.params['version'] state = module.params['state'] priority = module.params['priority'] preempt = module.params['preempt'] vip = module.params['vip'] auth_type = module.params['auth_type'] auth_string = module.params['auth_string'] transport = module.params['transport'] if state == 'present' and not vip: module.fail_json(msg='the "vip" param is required when state=present') for param in ['group', 'priority']: if module.params[param] is not None: validate_params(param, module) intf_type = get_interface_type(interface) if (intf_type != 'ethernet' and transport == 'cli'): if is_default(interface, module) == 'DNE': module.fail_json(msg='That interface does not exist yet. Create ' 'it first.', interface=interface) if intf_type == 'loopback': module.fail_json(msg="Loopback interfaces don't support HSRP.", interface=interface) mode = get_interface_mode(interface, intf_type, module) if mode == 'layer2': module.fail_json(msg='That interface is a layer2 port.\nMake it ' 'a layer 3 port first.', interface=interface) if auth_type or auth_string: if not (auth_type and auth_string): module.fail_json(msg='When using auth parameters, you need BOTH ' 'auth_type AND auth_string.') args = dict(group=group, version=version, priority=priority, preempt=preempt, vip=vip, auth_type=auth_type, auth_string=auth_string) proposed = dict((k, v) for k, v in args.iteritems() if v is not None) existing = get_hsrp_group(group, interface, module) # This will enforce better practice with md5 and hsrp version. if proposed.get('auth_type', None) == 'md5': if proposed['version'] == '1': module.fail_json(msg="It's recommended to use HSRP v2 " "when auth_type=md5") elif not proposed.get('auth_type', None) and existing: if (proposed['version'] == '1' and existing['auth_type'] == 'md5'): module.fail_json(msg="Existing auth_type is md5. It's recommended " "to use HSRP v2 when using md5") changed = False end_state = existing commands = [] if state == 'present': delta = dict( set(proposed.iteritems()).difference(existing.iteritems())) if delta: command = get_commands_config_hsrp(delta, interface, args) commands.extend(command) elif state == 'absent': if existing: command = get_commands_remove_hsrp(group, interface) commands.extend(command) if commands: if module.check_mode: module.exit_json(changed=True, commands=commands) else: body = execute_config_command(commands, module) if transport == 'cli': validate_config(body, vip, module) changed = True end_state = get_hsrp_group(group, interface, module) if 'configure' in commands: commands.pop(0) results = {} results['proposed'] = proposed results['existing'] = existing results['end_state'] = end_state results['updates'] = commands results['changed'] = changed module.exit_json(**results) if __name__ == '__main__': main()
MartinPyka/Parametric-Anatomical-Modeling
refs/heads/master
pam/trees/diameter.py
1
from . import mstree def add_quad_diameter(root_node, scale = 0.5, offset = 0.5, path_scale = 1.0): # For realistic dendrite thickness special quadratic coefficients are needed quad_coefficients = \ {8: (0.034881, -0.6837, 3.6564), 16: (0.013947, -0.51179, 4.9629), 24: (0.0064104, -0.39213, 6.0818), 32: (0.0040126, -0.33498, 7.0306), 40: (0.0028541, -0.2992, 7.8229), 48: (0.002163, -0.27289, 8.5377), 56: (0.0017122, -0.25251, 9.1937), 64: (0.0013991, -0.23611, 9.8033), 72: (0.0011712, -0.22255, 10.375), 80: (0.0009992, -0.21109, 10.915), 88: (0.00086562, -0.20124, 11.428), 96: (0.00075942, -0.19265, 11.917), 104: (0.00067336, -0.18507, 12.386), 112: (0.00060242, -0.17833, 12.837), 120: (0.00054315, -0.17227, 13.271), 128: (0.00049301, -0.16678, 13.691), 136: (0.00045017, -0.16179, 14.097), 144: (0.00041319, -0.15722, 14.49), 152: (0.00038102, -0.15301, 14.873), 160: (0.00035283, -0.14913, 15.245), 168: (0.00032796, -0.14552, 15.608), 176: (0.00030588, -0.14216, 15.961), 184: (0.00028618, -0.13902, 16.306), 192: (0.0002685, -0.13608, 16.644), 200: (0.00025257, -0.13332, 16.974), 208: (0.00023817, -0.13071, 17.297), 216: (0.00022508, -0.12825, 17.613), 224: (0.00021315, -0.12593, 17.924), 232: (0.00020224, -0.12372, 18.228), 240: (0.00019222, -0.12162, 18.527), 248: (0.00018301, -0.11963, 18.82), 256: (0.00017452, -0.11773, 19.109), 264: (0.00016666, -0.11591, 19.392), 272: (0.00015937, -0.11418, 19.671), 280: (0.0001526, -0.11251, 19.946), 288: (0.00014629, -0.11092, 20.216), 296: (0.00014041, -0.10939, 20.482), 304: (0.00013491, -0.10793, 20.744), 312: (0.00012976, -0.10651, 21.002), 320: (0.00012493, -0.10515, 21.257), 360: (0.00010472, -0.099041, 22.48), 400: (8.9357e-05, -0.093832, 23.639), 440: (7.735e-05, -0.089314, 24.745), 480: (6.7797e-05, -0.085364, 25.793), 520: (6.0049e-05, -0.081869, 26.789), 560: (5.3664e-05, -0.078748, 27.739), 600: (4.833e-05, -0.075937, 28.647), 640: (4.382e-05, -0.073387, 29.517), 680: (3.9968e-05, -0.071061, 30.351), 720: (3.6647e-05, -0.068927, 31.152), 760: (3.3762e-05, -0.066959, 31.922), 800: (3.1236e-05, -0.065137, 32.664), 840: (2.9011e-05, -0.063444, 33.378), 880: (2.704e-05, -0.061865, 34.067), 920: (2.5283e-05, -0.060388, 34.732), 960: (2.3712e-05, -0.059003, 35.373)} # Collect all nodes in a list nodes = mstree.tree_to_list(root_node) # Determine terminal nodes terminal_nodes = [node for node in nodes if not node.children] for terminal_node in terminal_nodes: node = terminal_node c = quad_coefficients[min(quad_coefficients, key=lambda x:abs(x - terminal_node.path_distance * path_scale))] while node is not None: x = node.path_distance * path_scale if not hasattr(node, 'temp_t'): node.temp_t = [] node.temp_t.append((x**2 * c[0] + x * c[1] + c[2]) * scale) node = node.parent for node in nodes: node.thickness = sum(node.temp_t) / len(node.temp_t) + offset del node.temp_t
mobarski/sandbox
refs/heads/master
topic/test_kcv.py
2
from contrib import * if 1: db = KCV('data/text.kcv') if 0: text = KV('data/text.db',5) for k,v in text.items(): db.set('text',k,v.decode('utf8')) db.sync() db.to_col_store('data/text_col.kcv',batch=10) if 1: db = KCV('data/tokens.kcv') if 1: text = KV('data/tokens.db',5) for k,v in text.items(): db.set('tokens',k.decode('utf8'),v) db.sync() db.to_col_store('data/tokens_col.kcv',batch=10)
MalloyPower/parsing-python
refs/heads/master
front-end/testsuite-python-lib/Python-2.4.3/Lib/test/test_traceback.py
15
"""Test cases for traceback module""" import unittest from test.test_support import run_unittest, is_jython import traceback class TracebackCases(unittest.TestCase): # For now, a very minimal set of tests. I want to be sure that # formatting of SyntaxErrors works based on changes for 2.1. def get_exception_format(self, func, exc): try: func() except exc, value: return traceback.format_exception_only(exc, value) else: raise ValueError, "call did not raise exception" def syntax_error_with_caret(self): compile("def fact(x):\n\treturn x!\n", "?", "exec") def syntax_error_without_caret(self): # XXX why doesn't compile raise the same traceback? import test.badsyntax_nocaret def test_caret(self): err = self.get_exception_format(self.syntax_error_with_caret, SyntaxError) self.assert_(len(err) == 4) self.assert_("^" in err[2]) # third line has caret self.assert_(err[1].strip() == "return x!") def test_nocaret(self): if is_jython: # jython adds a caret in this case (why shouldn't it?) return err = self.get_exception_format(self.syntax_error_without_caret, SyntaxError) self.assert_(len(err) == 3) self.assert_(err[1].strip() == "[x for x in x] = x") def test_bug737473(self): import sys, os, tempfile, time savedpath = sys.path[:] testdir = tempfile.mkdtemp() try: sys.path.insert(0, testdir) testfile = os.path.join(testdir, 'test_bug737473.py') print >> open(testfile, 'w'), """ def test(): raise ValueError""" if 'test_bug737473' in sys.modules: del sys.modules['test_bug737473'] import test_bug737473 try: test_bug737473.test() except ValueError: # this loads source code to linecache traceback.extract_tb(sys.exc_traceback) # If this test runs too quickly, test_bug737473.py's mtime # attribute will remain unchanged even if the file is rewritten. # Consequently, the file would not reload. So, added a sleep() # delay to assure that a new, distinct timestamp is written. # Since WinME with FAT32 has multisecond resolution, more than # three seconds are needed for this test to pass reliably :-( time.sleep(4) print >> open(testfile, 'w'), """ def test(): raise NotImplementedError""" reload(test_bug737473) try: test_bug737473.test() except NotImplementedError: src = traceback.extract_tb(sys.exc_traceback)[-1][-1] self.failUnlessEqual(src, 'raise NotImplementedError') finally: sys.path[:] = savedpath for f in os.listdir(testdir): os.unlink(os.path.join(testdir, f)) os.rmdir(testdir) def test_main(): run_unittest(TracebackCases) if __name__ == "__main__": test_main()
czgu/opendataexperience
refs/heads/master
env/lib/python2.7/site-packages/django/db/models/options.py
49
from __future__ import unicode_literals from bisect import bisect from collections import OrderedDict import warnings from django.apps import apps from django.conf import settings from django.db.models.fields.related import ManyToManyRel from django.db.models.fields import AutoField, FieldDoesNotExist from django.db.models.fields.proxy import OrderWrt from django.utils import six from django.utils.deprecation import RemovedInDjango18Warning from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.text import camel_case_to_spaces from django.utils.translation import activate, deactivate_all, get_language, string_concat DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created', 'index_together', 'apps', 'default_permissions', 'select_on_save') def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = next(iter(option_together)) if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together @python_2_unicode_compatible class Options(object): def __init__(self, meta, app_label=None): self.local_fields = [] self.local_many_to_many = [] self.virtual_fields = [] self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = '' self.ordering = [] self.unique_together = [] self.index_together = [] self.select_on_save = False self.default_permissions = ('add', 'change', 'delete') self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.meta = meta self.pk = None self.has_auto_field = False self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = OrderedDict() self.auto_created = False # To handle various inheritance situations, we need to track where # managers came from (concrete or abstract base classes). self.abstract_managers = [] self.concrete_managers = [] # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = apps @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) @property def installed(self): return self.app_config is not None def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith('_'): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) ut = meta_attrs.pop('unique_together', self.unique_together) self.unique_together = normalize_together(ut) it = meta_attrs.pop('index_together', self.index_together) self.index_together = normalize_together(it) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = string_concat(self.verbose_name, 's') # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys())) else: self.verbose_name_plural = string_concat(self.verbose_name, 's') del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) @property def module_name(self): """ This property has been deprecated in favor of `model_name`. refs #19689 """ warnings.warn( "Options.module_name has been deprecated in favor of model_name", RemovedInDjango18Warning, stacklevel=2) return self.model_name def _prepare(self, model): if self.order_with_respect_to: self.order_with_respect_to = self.get_field(self.order_with_respect_to) self.ordering = ('_order',) if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields): model.add_to_class('_order', OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(six.itervalues(self.parents)) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [fld for fld in self.local_fields if fld.name == field.name] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True) model.add_to_class('id', auto) def add_field(self, field): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if field.rel and isinstance(field.rel, ManyToManyRel): self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field) if hasattr(self, '_m2m_cache'): del self._m2m_cache else: self.local_fields.insert(bisect(self.local_fields, field), field) self.setup_pk(field) if hasattr(self, '_field_cache'): del self._field_cache del self._field_name_cache # The fields, concrete_fields and local_concrete_fields are # implemented as cached properties for performance reasons. # The attrs will not exists if the cached property isn't # accessed yet, hence the try-excepts. try: del self.fields except AttributeError: pass try: del self.concrete_fields except AttributeError: pass try: del self.local_concrete_fields except AttributeError: pass if hasattr(self, '_name_map'): del self._name_map def add_virtual_field(self, field): self.virtual_fields.append(field) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def pk_index(self): """ Returns the index of the primary key field in the self.concrete_fields list. """ return self.concrete_fields.index(self.pk) def setup_proxy(self, target): """ Does the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return '<Options for %s>' % self.object_name def __str__(self): return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name)) def verbose_name_raw(self): """ There are a few places where the untranslated verbose name is needed (so that we get the same value regardless of currently active locale). """ lang = get_language() deactivate_all() raw = force_text(self.verbose_name) activate(lang) return raw verbose_name_raw = property(verbose_name_raw) def _swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: model_label = '%s.%s' % (self.app_label, self.model_name) swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split('.') except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label): return swapped_for return None swapped = property(_swapped) @cached_property def fields(self): """ The getter for self.fields. This returns the list of field objects available to this model (including through parent models). Callers are not permitted to modify this list, since it's a reference to this instance (not a copy). """ try: self._field_name_cache except AttributeError: self._fill_fields_cache() return self._field_name_cache @cached_property def concrete_fields(self): return [f for f in self.fields if f.column is not None] @cached_property def local_concrete_fields(self): return [f for f in self.local_fields if f.column is not None] def get_fields_with_model(self): """ Returns a sequence of (field, model) pairs for all fields. The "model" element is None for fields on the current model. Mostly of use when constructing queries so that we know which model a field belongs to. """ try: self._field_cache except AttributeError: self._fill_fields_cache() return self._field_cache def get_concrete_fields_with_model(self): return [(field, model) for field, model in self.get_fields_with_model() if field.column is not None] def _fill_fields_cache(self): cache = [] for parent in self.parents: for field, model in parent._meta.get_fields_with_model(): if model: cache.append((field, model)) else: cache.append((field, parent)) cache.extend((f, None) for f in self.local_fields) self._field_cache = tuple(cache) self._field_name_cache = [x for x, _ in cache] def _many_to_many(self): try: self._m2m_cache except AttributeError: self._fill_m2m_cache() return list(self._m2m_cache) many_to_many = property(_many_to_many) def get_m2m_with_model(self): """ The many-to-many version of get_fields_with_model(). """ try: self._m2m_cache except AttributeError: self._fill_m2m_cache() return list(six.iteritems(self._m2m_cache)) def _fill_m2m_cache(self): cache = OrderedDict() for parent in self.parents: for field, model in parent._meta.get_m2m_with_model(): if model: cache[field] = model else: cache[field] = parent for field in self.local_many_to_many: cache[field] = None self._m2m_cache = cache def get_field(self, name, many_to_many=True): """ Returns the requested field by name. Raises FieldDoesNotExist on error. """ to_search = (self.fields + self.many_to_many) if many_to_many else self.fields for f in to_search: if f.name == name: return f raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name)) def get_field_by_name(self, name): """ Returns the (field_object, model, direct, m2m), where field_object is the Field instance for the given name, model is the model containing this field (None for local fields), direct is True if the field exists on this model, and m2m is True for many-to-many relations. When 'direct' is False, 'field_object' is the corresponding RelatedObject for this field (since the field doesn't have an instance associated with it). Uses a cache internally, so after the first access, this is very fast. """ try: try: return self._name_map[name] except AttributeError: cache = self.init_name_map() return cache[name] except KeyError: raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name)) def get_all_field_names(self): """ Returns a list of all field names that are possible for this model (including reverse relation names). This is used for pretty printing debugging output (a list of choices), so any internal-only field names are not included. """ try: cache = self._name_map except AttributeError: cache = self.init_name_map() names = sorted(cache.keys()) # Internal-only names end with "+" (symmetrical m2m related names being # the main example). Trim them. return [val for val in names if not val.endswith('+')] def init_name_map(self): """ Initialises the field name -> field object mapping. """ cache = {} # We intentionally handle related m2m objects first so that symmetrical # m2m accessor names can be overridden, if necessary. for f, model in self.get_all_related_m2m_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, True) for f, model in self.get_all_related_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, False) for f, model in self.get_m2m_with_model(): cache[f.name] = cache[f.attname] = (f, model, True, True) for f, model in self.get_fields_with_model(): cache[f.name] = cache[f.attname] = (f, model, True, False) for f in self.virtual_fields: if hasattr(f, 'related'): cache[f.name] = cache[f.attname] = ( f, None if f.model == self.model else f.model, True, False) if apps.ready: self._name_map = cache return cache def get_add_permission(self): """ This method has been deprecated in favor of `django.contrib.auth.get_permission_codename`. refs #20642 """ warnings.warn( "`Options.get_add_permission` has been deprecated in favor " "of `django.contrib.auth.get_permission_codename`.", RemovedInDjango18Warning, stacklevel=2) return 'add_%s' % self.model_name def get_change_permission(self): """ This method has been deprecated in favor of `django.contrib.auth.get_permission_codename`. refs #20642 """ warnings.warn( "`Options.get_change_permission` has been deprecated in favor " "of `django.contrib.auth.get_permission_codename`.", RemovedInDjango18Warning, stacklevel=2) return 'change_%s' % self.model_name def get_delete_permission(self): """ This method has been deprecated in favor of `django.contrib.auth.get_permission_codename`. refs #20642 """ warnings.warn( "`Options.get_delete_permission` has been deprecated in favor " "of `django.contrib.auth.get_permission_codename`.", RemovedInDjango18Warning, stacklevel=2) return 'delete_%s' % self.model_name def get_all_related_objects(self, local_only=False, include_hidden=False, include_proxy_eq=False): return [k for k, v in self.get_all_related_objects_with_model( local_only=local_only, include_hidden=include_hidden, include_proxy_eq=include_proxy_eq)] def get_all_related_objects_with_model(self, local_only=False, include_hidden=False, include_proxy_eq=False): """ Returns a list of (related-object, model) pairs. Similar to get_fields_with_model(). """ try: self._related_objects_cache except AttributeError: self._fill_related_objects_cache() predicates = [] if local_only: predicates.append(lambda k, v: not v) if not include_hidden: predicates.append(lambda k, v: not k.field.rel.is_hidden()) cache = (self._related_objects_proxy_cache if include_proxy_eq else self._related_objects_cache) return [t for t in cache.items() if all(p(*t) for p in predicates)] def _fill_related_objects_cache(self): cache = OrderedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True): if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model # Collect also objects which are in relation to some proxy child/parent of self. proxy_cache = cache.copy() for klass in self.apps.get_models(include_auto_created=True): if not klass._meta.swapped: for f in klass._meta.local_fields + klass._meta.virtual_fields: if (hasattr(f, 'rel') and f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation): if self == f.rel.to._meta: cache[f.related] = None proxy_cache[f.related] = None elif self.concrete_model == f.rel.to._meta.concrete_model: proxy_cache[f.related] = None self._related_objects_cache = cache self._related_objects_proxy_cache = proxy_cache def get_all_related_many_to_many_objects(self, local_only=False): try: cache = self._related_many_to_many_cache except AttributeError: cache = self._fill_related_many_to_many_cache() if local_only: return [k for k, v in cache.items() if not v] return list(cache) def get_all_related_m2m_objects_with_model(self): """ Returns a list of (related-m2m-object, model) pairs. Similar to get_fields_with_model(). """ try: cache = self._related_many_to_many_cache except AttributeError: cache = self._fill_related_many_to_many_cache() return list(six.iteritems(cache)) def _fill_related_many_to_many_cache(self): cache = OrderedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_m2m_objects_with_model(): if obj.field.creation_counter < 0 and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model for klass in self.apps.get_models(): if not klass._meta.swapped: for f in klass._meta.local_many_to_many: if (f.rel and not isinstance(f.rel.to, six.string_types) and self == f.rel.to._meta): cache[f.related] = None if apps.ready: self._related_many_to_many_cache = cache return cache def get_base_chain(self, model): """ Returns a list of parent classes leading to 'model' (order from closet to most distant ancestor). This has to handle the case were 'model' is a grandparent or even more distant relation. """ if not self.parents: return None if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return None def get_parent_list(self): """ Returns a list of all the ancestor of this model as a list. Useful for determining if something is an ancestor, regardless of lineage. """ result = set() for parent in self.parents: result.add(parent) result.update(parent._meta.get_parent_list()) return result def get_ancestor_link(self, ancestor): """ Returns the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Returns None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link
qedsoftware/commcare-hq
refs/heads/master
corehq/apps/sms/api.py
1
import logging import random import string from django.conf import settings from django.core.exceptions import ValidationError from corehq.apps.smsbillables.utils import log_smsbillables_error from corehq.apps.users.models import CommCareUser, WebUser from dimagi.utils.modules import to_function from dimagi.utils.logging import notify_exception from corehq import privileges from corehq.apps.accounting.utils import domain_has_privilege from corehq.apps.sms.util import (clean_phone_number, clean_text, get_backend_classes) from corehq.apps.sms.models import (OUTGOING, INCOMING, PhoneBlacklist, SMS, SelfRegistrationInvitation, MessagingEvent, SQLMobileBackend, SQLSMSBackend, QueuedSMS, PhoneNumber) from corehq.apps.sms.messages import (get_message, MSG_OPTED_IN, MSG_OPTED_OUT, MSG_DUPLICATE_USERNAME, MSG_USERNAME_TOO_LONG, MSG_REGISTRATION_WELCOME_CASE, MSG_REGISTRATION_WELCOME_MOBILE_WORKER) from corehq.apps.sms.mixin import BadSMSConfigException from corehq.apps.sms.util import is_contact_active from corehq.apps.domain.models import Domain from datetime import datetime from corehq.apps.sms.util import register_sms_contact, strip_plus # A list of all keywords which allow registration via sms. # Meant to allow support for multiple languages. # Keywords should be in all caps. REGISTRATION_KEYWORDS = ["JOIN"] REGISTRATION_MOBILE_WORKER_KEYWORDS = ["WORKER"] class BackendAuthorizationException(Exception): pass def get_utcnow(): """ Used to make it easier to mock utcnow() in the tests. """ return datetime.utcnow() class MessageMetadata(object): def __init__(self, *args, **kwargs): self.workflow = kwargs.get("workflow", None) self.xforms_session_couch_id = kwargs.get("xforms_session_couch_id", None) self.reminder_id = kwargs.get("reminder_id", None) self.chat_user_id = kwargs.get("chat_user_id", None) self.ignore_opt_out = kwargs.get("ignore_opt_out", None) self.location_id = kwargs.get('location_id', None) self.messaging_subevent_id = kwargs.get('messaging_subevent_id', None) def add_msg_tags(msg, metadata): if msg and metadata: fields = ('workflow', 'xforms_session_couch_id', 'reminder_id', 'chat_user_id', 'ignore_opt_out', 'location_id', 'messaging_subevent_id') for field in fields: value = getattr(metadata, field) if value is not None: setattr(msg, field, value) msg.save() def log_sms_exception(msg): direction = "OUT" if msg.direction == OUTGOING else "IN" message = "[SMS %s] Error processing SMS" % direction notify_exception(None, message=message, details={ 'domain': msg.domain, 'date': msg.date, 'message_id': msg.couch_id, }) def get_location_id_by_contact(domain, contact): if isinstance(contact, CommCareUser): return contact.location_id elif isinstance(contact, WebUser): return contact.get_location_id(domain) else: return None def get_location_id_by_verified_number(v): return get_location_id_by_contact(v.domain, v.owner) def get_sms_class(): return QueuedSMS if settings.SMS_QUEUE_ENABLED else SMS def send_sms(domain, contact, phone_number, text, metadata=None): """ Sends an outbound SMS. Returns false if it fails. """ if phone_number is None: return False if isinstance(phone_number, int) or isinstance(phone_number, long): phone_number = str(phone_number) phone_number = clean_phone_number(phone_number) msg = get_sms_class()( domain=domain, phone_number=phone_number, direction=OUTGOING, date=get_utcnow(), backend_id=None, location_id=get_location_id_by_contact(domain, contact), text = text ) if contact: msg.couch_recipient = contact.get_id msg.couch_recipient_doc_type = contact.doc_type add_msg_tags(msg, metadata) return queue_outgoing_sms(msg) def send_sms_to_verified_number(verified_number, text, metadata=None, logged_subevent=None): """ Sends an sms using the given verified phone number entry. verified_number The PhoneNumber entry to use when sending. text The text of the message to send. return True on success, False on failure """ try: backend = verified_number.backend except BadSMSConfigException as e: if logged_subevent: logged_subevent.error(MessagingEvent.ERROR_GATEWAY_NOT_FOUND, additional_error_text=e.message) return False raise msg = get_sms_class()( couch_recipient_doc_type = verified_number.owner_doc_type, couch_recipient = verified_number.owner_id, phone_number = "+" + str(verified_number.phone_number), direction = OUTGOING, date=get_utcnow(), domain = verified_number.domain, backend_id=backend.couch_id, location_id=get_location_id_by_verified_number(verified_number), text = text ) add_msg_tags(msg, metadata) return queue_outgoing_sms(msg) def send_sms_with_backend(domain, phone_number, text, backend_id, metadata=None): phone_number = clean_phone_number(phone_number) msg = get_sms_class()( domain=domain, phone_number=phone_number, direction=OUTGOING, date=get_utcnow(), backend_id=backend_id, text=text ) add_msg_tags(msg, metadata) return queue_outgoing_sms(msg) def send_sms_with_backend_name(domain, phone_number, text, backend_name, metadata=None): phone_number = clean_phone_number(phone_number) backend = SQLMobileBackend.load_by_name(SQLMobileBackend.SMS, domain, backend_name) msg = get_sms_class()( domain=domain, phone_number=phone_number, direction=OUTGOING, date=get_utcnow(), backend_id=backend.couch_id, text=text ) add_msg_tags(msg, metadata) return queue_outgoing_sms(msg) def enqueue_directly(msg): try: from corehq.apps.sms.management.commands.run_sms_queue import SMSEnqueuingOperation SMSEnqueuingOperation().enqueue_directly(msg) except: # If this direct enqueue fails, no problem, it will get picked up # shortly. pass def queue_outgoing_sms(msg): if settings.SMS_QUEUE_ENABLED: try: msg.processed = False msg.datetime_to_process = msg.date msg.queued_timestamp = get_utcnow() msg.save() except: log_sms_exception(msg) return False enqueue_directly(msg) return True else: msg.processed = True msg_sent = send_message_via_backend(msg) msg.publish_change() if msg_sent: create_billable_for_sms(msg) return msg_sent def send_message_via_backend(msg, backend=None, orig_phone_number=None): """send sms using a specific backend msg - outbound message object backend - backend to use for sending; if None, msg.outbound_backend is used orig_phone_number - the originating phone number to use when sending; this is sent in if the backend supports load balancing """ try: msg.text = clean_text(msg.text) except Exception: logging.exception("Could not clean text for sms dated '%s' in domain '%s'" % (msg.date, msg.domain)) try: if not domain_has_privilege(msg.domain, privileges.OUTBOUND_SMS): raise Exception( ("Domain '%s' does not have permission to send SMS." " Please investigate why this function was called.") % msg.domain ) phone_obj = PhoneBlacklist.get_by_phone_number_or_none(msg.phone_number) if phone_obj and not phone_obj.send_sms: if msg.ignore_opt_out and phone_obj.can_opt_in: # If ignore_opt_out is True on the message, then we'll still # send it. However, if we're not letting the phone number # opt back in and it's in an opted-out state, we will not # send anything to it no matter the state of the ignore_opt_out # flag. pass else: msg.set_system_error(SMS.ERROR_PHONE_NUMBER_OPTED_OUT) return False if not backend: backend = msg.outbound_backend if backend.domain_is_authorized(msg.domain): backend.send(msg, orig_phone_number=orig_phone_number) else: raise BackendAuthorizationException( "Domain '%s' is not authorized to use backend '%s'" % (msg.domain, backend.pk) ) msg.backend_api = backend.hq_api_id msg.backend_id = backend.couch_id msg.save() return True except Exception: log_sms_exception(msg) return False def random_password(): """ This method creates a random password for an sms user registered via sms """ chars = string.ascii_uppercase + string.ascii_lowercase + string.digits return ''.join(random.choice(chars) for x in range(15)) def process_username(username, domain): from corehq.apps.users.forms import (clean_mobile_worker_username, get_mobile_worker_max_username_length) max_length = get_mobile_worker_max_username_length(domain) return clean_mobile_worker_username( domain, username, name_too_long_message=get_message(MSG_USERNAME_TOO_LONG, context=(username, max_length)), name_exists_message=get_message(MSG_DUPLICATE_USERNAME, context=(username,)) ) def is_registration_text(text): keywords = text.strip().upper().split() if len(keywords) == 0: return False return keywords[0] in REGISTRATION_KEYWORDS def process_pre_registration(msg): """ Returns True if this message was part of the SMS pre-registration workflow (see corehq.apps.sms.models.SelfRegistrationInvitation). Returns False if it's not part of the pre-registration workflow or if the workflow has already been completed. """ invitation = SelfRegistrationInvitation.by_phone(msg.phone_number) if not invitation: return False domain = Domain.get_by_name(invitation.domain, strict=True) if not domain.sms_mobile_worker_registration_enabled: return False text = msg.text.strip() if is_registration_text(text): # Return False to let the message be processed through the SMS # registration workflow return False elif invitation.phone_type: # If the user has already indicated what kind of phone they have, # but is still replying with sms, then just resend them the # appropriate registration instructions if invitation.phone_type == SelfRegistrationInvitation.PHONE_TYPE_ANDROID: invitation.send_step2_android_sms() elif invitation.phone_type == SelfRegistrationInvitation.PHONE_TYPE_OTHER: invitation.send_step2_java_sms() return True elif text == '1': invitation.phone_type = SelfRegistrationInvitation.PHONE_TYPE_ANDROID invitation.save() invitation.send_step2_android_sms() return True elif text == '2': invitation.phone_type = SelfRegistrationInvitation.PHONE_TYPE_OTHER invitation.save() invitation.send_step2_java_sms() return True else: invitation.send_step1_sms() return True def process_sms_registration(msg): """ This method handles registration via sms. Returns True if a contact was registered, False if not. To have a case register itself, do the following: 1) Select "Enable Case Registration Via SMS" in project settings, and fill in the associated Case Registration settings. 2) Text in "join <domain>", where <domain> is the domain to join. If the sending number does not exist in the system, a case will be registered tied to that number. The "join" keyword can be any keyword in REGISTRATION_KEYWORDS. This is meant to support multiple translations. To have a mobile worker register itself, do the following: 1) Select "Enable Mobile Worker Registration via SMS" in project settings. 2) Text in "join <domain> worker <username>", where <domain> is the domain to join and <username> is the requested username. If the username doesn't exist it will be created, otherwise the registration will error. If the username argument is not specified, the username will be the mobile number The "join" and "worker" keywords can be any keyword in REGISTRATION_KEYWORDS and REGISTRATION_MOBILE_WORKER_KEYWORDS, respectively. This is meant to support multiple translations. """ registration_processed = False text_words = msg.text.upper().split() keyword1 = text_words[0] if len(text_words) > 0 else "" keyword2 = text_words[1].lower() if len(text_words) > 1 else "" keyword3 = text_words[2] if len(text_words) > 2 else "" keyword4 = text_words[3] if len(text_words) > 3 else "" cleaned_phone_number = strip_plus(msg.phone_number) if is_registration_text(msg.text) and keyword2 != "": domain = Domain.get_by_name(keyword2, strict=True) if domain is not None: if domain_has_privilege(domain, privileges.INBOUND_SMS): if keyword3 in REGISTRATION_MOBILE_WORKER_KEYWORDS and domain.sms_mobile_worker_registration_enabled: if keyword4 != '': username = keyword4 else: username = cleaned_phone_number try: user_data = {} invitation = SelfRegistrationInvitation.by_phone(msg.phone_number) if invitation: invitation.completed() user_data = invitation.custom_user_data username = process_username(username, domain) password = random_password() new_user = CommCareUser.create(domain.name, username, password, user_data=user_data) new_user.add_phone_number(cleaned_phone_number) new_user.save_verified_number(domain.name, cleaned_phone_number, True, None) new_user.save() registration_processed = True if domain.enable_registration_welcome_sms_for_mobile_worker: send_sms(domain.name, None, cleaned_phone_number, get_message(MSG_REGISTRATION_WELCOME_MOBILE_WORKER, domain=domain.name)) except ValidationError as e: send_sms(domain.name, None, cleaned_phone_number, e.messages[0]) elif domain.sms_case_registration_enabled: register_sms_contact( domain=domain.name, case_type=domain.sms_case_registration_type, case_name="unknown", user_id=domain.sms_case_registration_user_id, contact_phone_number=cleaned_phone_number, contact_phone_number_is_verified="1", owner_id=domain.sms_case_registration_owner_id, ) registration_processed = True if domain.enable_registration_welcome_sms_for_case: send_sms(domain.name, None, cleaned_phone_number, get_message(MSG_REGISTRATION_WELCOME_CASE, domain=domain.name)) msg.domain = domain.name msg.save() return registration_processed def incoming(phone_number, text, backend_api, timestamp=None, domain_scope=None, backend_message_id=None, raw_text=None, backend_id=None): """ entry point for incoming sms phone_number - originating phone number text - message content backend_api - backend API ID of receiving sms backend timestamp - message received timestamp; defaults to now (UTC) domain_scope - set the domain scope for this SMS; see SMSBase.domain_scope for details """ # Log message in message log if text is None: text = "" phone_number = clean_phone_number(phone_number) msg = get_sms_class()( phone_number=phone_number, direction=INCOMING, date=timestamp or get_utcnow(), text=text, domain_scope=domain_scope, backend_api=backend_api, backend_id=backend_id, backend_message_id=backend_message_id, raw_text=raw_text, ) if settings.SMS_QUEUE_ENABLED: msg.processed = False msg.datetime_to_process = get_utcnow() msg.queued_timestamp = msg.datetime_to_process msg.save() enqueue_directly(msg) else: msg.processed = True msg.save() process_incoming(msg) return msg def is_opt_message(text, keyword_list): if not isinstance(text, basestring): return False text = text.strip().upper() return text in keyword_list def get_opt_keywords(msg): backend_class = get_backend_classes().get(msg.backend_api, SQLSMSBackend) return ( backend_class.get_opt_in_keywords(), backend_class.get_opt_out_keywords() ) def load_and_call(sms_handler_names, phone_number, text, sms): handled = False for sms_handler_name in sms_handler_names: try: handler = to_function(sms_handler_name) except: notify_exception(None, message=('error loading sms handler: %s' % sms_handler_name)) continue try: handled = handler(phone_number, text, sms) except Exception: log_sms_exception(sms) if handled: break return handled def process_incoming(msg): v = PhoneNumber.by_phone(msg.phone_number, include_pending=True) if v: msg.couch_recipient_doc_type = v.owner_doc_type msg.couch_recipient = v.owner_id msg.domain = v.domain msg.location_id = get_location_id_by_verified_number(v) msg.save() elif msg.domain_scope: msg.domain = msg.domain_scope msg.save() can_receive_sms = PhoneBlacklist.can_receive_sms(msg.phone_number) opt_in_keywords, opt_out_keywords = get_opt_keywords(msg) domain = v.domain if v else None if is_opt_message(msg.text, opt_out_keywords) and can_receive_sms: if PhoneBlacklist.opt_out_sms(msg.phone_number, domain=domain): metadata = MessageMetadata(ignore_opt_out=True) text = get_message(MSG_OPTED_OUT, v, context=(opt_in_keywords[0],)) if v: send_sms_to_verified_number(v, text, metadata=metadata) else: send_sms(msg.domain, None, msg.phone_number, text, metadata=metadata) elif is_opt_message(msg.text, opt_in_keywords) and not can_receive_sms: if PhoneBlacklist.opt_in_sms(msg.phone_number, domain=domain): text = get_message(MSG_OPTED_IN, v, context=(opt_out_keywords[0],)) if v: send_sms_to_verified_number(v, text) else: send_sms(msg.domain, None, msg.phone_number, text) else: handled = False is_verified = v is not None and v.verified if msg.domain and domain_has_privilege(msg.domain, privileges.INBOUND_SMS): handled = load_and_call(settings.CUSTOM_SMS_HANDLERS, v, msg.text, msg) if not handled and is_verified and is_contact_active(v.domain, v.owner_doc_type, v.owner_id): handled = load_and_call(settings.SMS_HANDLERS, v, msg.text, msg) if not handled and not is_verified: handled = process_pre_registration(msg) if not handled: handled = process_sms_registration(msg) if not handled: import verify verify.process_verification(v, msg) # If the sms queue is enabled, then the billable gets created in remove_from_queue() if ( not settings.SMS_QUEUE_ENABLED and msg.domain and domain_has_privilege(msg.domain, privileges.INBOUND_SMS) ): create_billable_for_sms(msg) def create_billable_for_sms(msg, delay=True): if not isinstance(msg, SMS): raise Exception("Expected msg to be an SMS") if not msg.domain: return try: from corehq.apps.sms.tasks import store_billable if delay: store_billable.delay(msg) else: store_billable(msg) except Exception as e: log_smsbillables_error("Errors Creating SMS Billable: %s" % e)
mollstam/UnrealPy
refs/heads/master
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Scrapy-1.0.1/scrapy/utils/deprecate.py
109
"""Some helpers for deprecation messages""" import warnings import inspect from scrapy.exceptions import ScrapyDeprecationWarning def attribute(obj, oldattr, newattr, version='0.12'): cname = obj.__class__.__name__ warnings.warn("%s.%s attribute is deprecated and will be no longer supported " "in Scrapy %s, use %s.%s attribute instead" % \ (cname, oldattr, version, cname, newattr), ScrapyDeprecationWarning, stacklevel=3) def create_deprecated_class(name, new_class, clsdict=None, warn_category=ScrapyDeprecationWarning, warn_once=True, old_class_path=None, new_class_path=None, subclass_warn_message="{cls} inherits from "\ "deprecated class {old}, please inherit "\ "from {new}.", instance_warn_message="{cls} is deprecated, "\ "instantiate {new} instead."): """ Return a "deprecated" class that causes its subclasses to issue a warning. Subclasses of ``new_class`` are considered subclasses of this class. It also warns when the deprecated class is instantiated, but do not when its subclasses are instantiated. It can be used to rename a base class in a library. For example, if we have class OldName(SomeClass): # ... and we want to rename it to NewName, we can do the following:: class NewName(SomeClass): # ... OldName = create_deprecated_class('OldName', NewName) Then, if user class inherits from OldName, warning is issued. Also, if some code uses ``issubclass(sub, OldName)`` or ``isinstance(sub(), OldName)`` checks they'll still return True if sub is a subclass of NewName instead of OldName. """ class DeprecatedClass(new_class.__class__): deprecated_class = None warned_on_subclass = False def __new__(metacls, name, bases, clsdict_): cls = super(DeprecatedClass, metacls).__new__(metacls, name, bases, clsdict_) if metacls.deprecated_class is None: metacls.deprecated_class = cls return cls def __init__(cls, name, bases, clsdict_): meta = cls.__class__ old = meta.deprecated_class if old in bases and not (warn_once and meta.warned_on_subclass): meta.warned_on_subclass = True msg = subclass_warn_message.format(cls=_clspath(cls), old=_clspath(old, old_class_path), new=_clspath(new_class, new_class_path)) if warn_once: msg += ' (warning only on first subclass, there may be others)' warnings.warn(msg, warn_category, stacklevel=2) super(DeprecatedClass, cls).__init__(name, bases, clsdict_) # see http://www.python.org/dev/peps/pep-3119/#overloading-isinstance-and-issubclass # and http://docs.python.org/2/reference/datamodel.html#customizing-instance-and-subclass-checks # for implementation details def __instancecheck__(cls, inst): return any(cls.__subclasscheck__(c) for c in {type(inst), inst.__class__}) def __subclasscheck__(cls, sub): if cls is not DeprecatedClass.deprecated_class: # we should do the magic only if second `issubclass` argument # is the deprecated class itself - subclasses of the # deprecated class should not use custom `__subclasscheck__` # method. return super(DeprecatedClass, cls).__subclasscheck__(sub) if not inspect.isclass(sub): raise TypeError("issubclass() arg 1 must be a class") mro = getattr(sub, '__mro__', ()) return any(c in {cls, new_class} for c in mro) def __call__(cls, *args, **kwargs): old = DeprecatedClass.deprecated_class if cls is old: msg = instance_warn_message.format(cls=_clspath(cls, old_class_path), new=_clspath(new_class, new_class_path)) warnings.warn(msg, warn_category, stacklevel=2) return super(DeprecatedClass, cls).__call__(*args, **kwargs) deprecated_cls = DeprecatedClass(name, (new_class,), clsdict or {}) try: frm = inspect.stack()[1] parent_module = inspect.getmodule(frm[0]) if parent_module is not None: deprecated_cls.__module__ = parent_module.__name__ except Exception as e: # Sometimes inspect.stack() fails (e.g. when the first import of # deprecated class is in jinja2 template). __module__ attribute is not # important enough to raise an exception as users may be unable # to fix inspect.stack() errors. warnings.warn("Error detecting parent module: %r" % e) return deprecated_cls def _clspath(cls, forced=None): if forced is not None: return forced return '{}.{}'.format(cls.__module__, cls.__name__) DEPRECATION_RULES = [ ('scrapy.contrib_exp.downloadermiddleware.decompression.', 'scrapy.downloadermiddlewares.decompression.'), ('scrapy.contrib_exp.iterators.', 'scrapy.utils.iterators.'), ('scrapy.contrib.downloadermiddleware.', 'scrapy.downloadermiddlewares.'), ('scrapy.contrib.exporter.', 'scrapy.exporters.'), ('scrapy.contrib.linkextractors.', 'scrapy.linkextractors.'), ('scrapy.contrib.loader.processor.', 'scrapy.loader.processors.'), ('scrapy.contrib.loader.', 'scrapy.loader.'), ('scrapy.contrib.pipeline.', 'scrapy.pipelines.'), ('scrapy.contrib.spidermiddleware.', 'scrapy.spidermiddlewares.'), ('scrapy.contrib.spiders.', 'scrapy.spiders.'), ('scrapy.contrib.', 'scrapy.extensions.'), ('scrapy.command.', 'scrapy.commands.'), ('scrapy.dupefilter.', 'scrapy.dupefilters.'), ('scrapy.linkextractor.', 'scrapy.linkextractors.'), ('scrapy.spider.', 'scrapy.spiders.'), ('scrapy.squeue.', 'scrapy.squeues.'), ('scrapy.statscol.', 'scrapy.statscollectors.'), ('scrapy.utils.decorator.', 'scrapy.utils.decorators.'), ('scrapy.spidermanager.SpiderManager', 'scrapy.spiderloader.SpiderLoader'), ] def update_classpath(path): """Update a deprecated path from an object with its new location""" for prefix, replacement in DEPRECATION_RULES: if path.startswith(prefix): new_path = path.replace(prefix, replacement, 1) warnings.warn("`{}` class is deprecated, use `{}` instead".format(path, new_path), ScrapyDeprecationWarning) return new_path return path
dmitry-sobolev/ansible
refs/heads/devel
lib/ansible/modules/windows/win_dotnet_ngen.py
78
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Peter Mounce <public@neverrunwithscissors.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_dotnet_ngen version_added: "2.0" short_description: Runs ngen to recompile DLLs after .NET updates description: - After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host. - This happens via scheduled task, usually at some inopportune time. - This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time. - "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx" notes: - there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem - there's no way to test if they've been completed (?) - the stdout is quite likely to be several megabytes author: Peter Mounce options: {} ''' EXAMPLES = r''' # Run ngen tasks win_dotnet_ngen: '''
fy2462/apollo
refs/heads/master
modules/e2e/online_system/car_act/scripts/main.py
1
#!/usr/bin/env python ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### """ This module publishes the car control message. """ import rospy import time import threading from car_msgs.msg import ABS from car_msgs.msg import ABM from car_msgs.msg import BCM from car_msgs.msg import ECM from car_msgs.msg import SAS from car_msgs.msg import PX2 from rc_msgs.msg import RCCommand from novatel_msgs.msg import INSPVA from std_msgs.msg import Bool import sys import signal import numpy as np class CarAct(object): """Summary of class here. Subscribe the CAN message and generate the car control message. Attributes: current_mode: current car control mode. abs_lock: threading lock for the abs CAN message. abm_lock: threading lock for the abm CAN message. ecm_lock: threading lock for the ecm CAN message. bcm_lock: threading lock for the bcm CAN message. sas_lock: threading lock for the sas CAN message. px2_lock: threading lock for the px2 CAN message. rc_lock: threading lock for the rc CAN message. inspva_lock: threading lock for the inspva message. abs_message: abs CAN message. abm_message: abm CAN message. bcm_message: bcm CAN message. ecm_message: ecm CAN message. sas_message: sas CAN message. rc_message: rc message. px2_message: the computed speed and steering_angle from the PX2. inspva_message: inspva message from the novatel. mode: car control mode. mode_sub: subscriber for the mode message. abs_sub: subscriber for the abs CAN message. abm_sub: subscriber for the abm CAN message. ecm_sub: subscriber for the ecm CAN message. sas_sub: subscriber for the sas CAN message. bcm_sub: subscriber for the bcm CAN message. px2_sub: subscriber for the computed speed and steering_angle from the PX2. rc_sub: subscriber for the rc message. inspva_sub: subscriber for the novatel inspva message. px2_pub: publisher for the car control message. """ def __init__(self): """ Inits the CarAct .""" self.current_mode = 0 self.abs_lock = threading.Lock() self.abm_lock = threading.Lock() self.ecm_lock = threading.Lock() self.bcm_lock = threading.Lock() self.sas_lock = threading.Lock() self.px2_lock = threading.Lock() self.rc_lock = threading.Lock() self.inspva_lock = threading.Lock() self.abs_message = ABS() self.abm_message = ABM() self.bcm_message = BCM() self.ecm_message = ECM() self.sas_message = SAS() self.rc_message = RCCommand() self.px2_message = PX2() self.inspva_message = INSPVA() self.mode = Bool("True") self.mode_sub = rospy.Subscriber( '/temp/mode', Bool, self.mode_callback, queue_size=1) self.abs_sub = rospy.Subscriber( '/car_msgs/ABS', ABS, self.abs_callback, queue_size=1) self.abm_sub = rospy.Subscriber( '/car_msgs/ABM', ABM, self.abm_callback, queue_size=1) self.ecm_sub = rospy.Subscriber( '/car_msgs/ECM', ECM, self.ecm_callback, queue_size=1) self.sas_sub = rospy.Subscriber( '/car_msgs/SAS', SAS, self.sas_callback, queue_size=1) self.bcm_sub = rospy.Subscriber( '/car_msgs/BCM', BCM, self.bcm_callback, queue_size=1) self.px2_sub = rospy.Subscriber( '/car_msgs/estimate_px2', PX2, self.px2_callback, queue_size=1) self.rc_sub = rospy.Subscriber( '/car_msgs/rc_command', RCCommand, self.rc_callback, queue_size=10) self.inspva_sub = rospy.Subscriber( '/novatel_data/inspva', INSPVA, self.inspva_callback, queue_size=1) self.px2_pub = rospy.Publisher('/car_msgs/PX2', PX2, queue_size=1) def inspva_callback(self, data): """inspva_callback function.""" self.inspva_lock.acquire() self.inspva_message = data self.inspva_lock.release() def mode_callback(self, data): """mode_callback function.""" self.mode = data def abs_callback(self, data): """abs_callback function.""" self.abs_lock.acquire() self.abs_message = data self.abs_lock.release() def abm_callback(self, data): """abm_callback function.""" self.abm_lock.acquire() self.abm_message = data self.abm_lock.release() def ecm_callback(self, data): """ecm_callback function.""" self.ecm_lock.acquire() self.ecm_message = data self.ecm_lock.release() def sas_callback(self, data): """sas_callback function.""" self.sas_lock.acquire() self.sas_message = data self.sas_lock.release() def bcm_callback(self, data): """bcm_callback function.""" self.bcm_lock.acquire() self.bcm_message = data self.bcm_lock.release() def px2_callback(self, data): """px2_callback function.""" self.px2_lock.acquire() self.px2_message = data self.px2_lock.release() def rc_callback(self, data): """rc_callback function.""" self.rc_lock.acquire() self.rc_message = data self.rc_lock.release() def roll(self): """ros roll main function.""" rate = rospy.Rate(50) while not rospy.is_shutdown(): self.rc_lock.acquire() rc = self.rc_message self.rc_lock.release() px2_message = PX2() px2_message.Mode = 2 px2_message.header.stamp = rospy.Time.now() if rc.enable: px2_message.TarSteeringAngle = rc.steering_angle # Mock rc angle self.px2_lock.acquire() px2_message = self.px2_message self.px2_lock.release() px2_message.TarAcce = rc.acceleration px2_message.Mode = 2 if px2_message.TarAcce < 0: px2_message.DecToStop = 0 else: px2_message.DecToStop = 1 else: self.px2_lock.acquire() target_angle = self.px2_message.TarSteeringAngle target_speed = self.px2_message.TarAcce self.px2_lock.release() self.ecm_lock.acquire() current_speed = self.ecm_message.VehicleSpd self.ecm_lock.release() self.sas_lock.acquire() angle = self.sas_message.SteeringWheelAngle angle_sign = self.sas_message.SteeringWheelAngleSign if angle_sign == 1: angle = -angle self.sas_lock.release() self.inspva_lock.acquire() longitude = self.inspva_message.longitude latitude = self.inspva_message.latitude heading = self.inspva_message.azimuth self.inspva_lock.release() px2_message.TarAcce = target_acce px2_message.TarSteeringAngle = target_angle self.px2_pub.publish(px2_message) rate.sleep() if __name__ == '__main__': rospy.init_node('car_act', anonymous=True) car_act = CarAct() car_act.roll() rospy.spin()
charbeljc/OCB
refs/heads/8.0
openerp/api.py
32
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ This module provides the elements for managing two different API styles, namely the "traditional" and "record" styles. In the "traditional" style, parameters like the database cursor, user id, context dictionary and record ids (usually denoted as ``cr``, ``uid``, ``context``, ``ids``) are passed explicitly to all methods. In the "record" style, those parameters are hidden into model instances, which gives it a more object-oriented feel. For instance, the statements:: model = self.pool.get(MODEL) ids = model.search(cr, uid, DOMAIN, context=context) for rec in model.browse(cr, uid, ids, context=context): print rec.name model.write(cr, uid, ids, VALUES, context=context) may also be written as:: env = Env(cr, uid, context) # cr, uid, context wrapped in env recs = env[MODEL] # retrieve an instance of MODEL recs = recs.search(DOMAIN) # search returns a recordset for rec in recs: # iterate over the records print rec.name recs.write(VALUES) # update all records in recs Methods written in the "traditional" style are automatically decorated, following some heuristics based on parameter names. """ __all__ = [ 'Environment', 'Meta', 'guess', 'noguess', 'model', 'multi', 'one', 'cr', 'cr_context', 'cr_uid', 'cr_uid_context', 'cr_uid_id', 'cr_uid_id_context', 'cr_uid_ids', 'cr_uid_ids_context', 'constrains', 'depends', 'onchange', 'returns', ] import logging import operator from inspect import currentframe, getargspec from collections import defaultdict, MutableMapping from contextlib import contextmanager from pprint import pformat from weakref import WeakSet from werkzeug.local import Local, release_local from openerp.tools import frozendict, classproperty _logger = logging.getLogger(__name__) # The following attributes are used, and reflected on wrapping methods: # - method._constrains: set by @constrains, specifies constraint dependencies # - method._depends: set by @depends, specifies compute dependencies # - method._returns: set by @returns, specifies return model # - method._onchange: set by @onchange, specifies onchange fields # - method.clear_cache: set by @ormcache, used to clear the cache # # On wrapping method only: # - method._api: decorator function, used for re-applying decorator # - method._orig: original method # WRAPPED_ATTRS = ('__module__', '__name__', '__doc__', '_constrains', '_depends', '_onchange', '_returns', 'clear_cache') INHERITED_ATTRS = ('_returns',) class Meta(type): """ Metaclass that automatically decorates traditional-style methods by guessing their API. It also implements the inheritance of the :func:`returns` decorators. """ def __new__(meta, name, bases, attrs): # dummy parent class to catch overridden methods decorated with 'returns' parent = type.__new__(meta, name, bases, {}) for key, value in attrs.items(): if not key.startswith('__') and callable(value): # make the method inherit from decorators value = propagate(getattr(parent, key, None), value) # guess calling convention if none is given if not hasattr(value, '_api'): try: value = guess(value) except TypeError: pass attrs[key] = value return type.__new__(meta, name, bases, attrs) identity = lambda x: x def decorate(method, attr, value): """ Decorate `method` or its original method. """ if getattr(method, '_api', False): # decorate the original method, and re-apply the api decorator setattr(method._orig, attr, value) return method._api(method._orig) else: # simply decorate the method itself setattr(method, attr, value) return method def propagate(from_method, to_method): """ Propagate decorators from `from_method` to `to_method`, and return the resulting method. """ if from_method: for attr in INHERITED_ATTRS: if hasattr(from_method, attr) and not hasattr(to_method, attr): to_method = decorate(to_method, attr, getattr(from_method, attr)) return to_method def constrains(*args): """ Decorates a constraint checker. Each argument must be a field name used in the check:: @api.one @api.constrains('name', 'description') def _check_description(self): if self.name == self.description: raise ValidationError("Fields name and description must be different") Invoked on the records on which one of the named fields has been modified. Should raise :class:`~openerp.exceptions.ValidationError` if the validation failed. """ return lambda method: decorate(method, '_constrains', args) def onchange(*args): """ Return a decorator to decorate an onchange method for given fields. Each argument must be a field name:: @api.onchange('partner_id') def _onchange_partner(self): self.message = "Dear %s" % (self.partner_id.name or "") In the form views where the field appears, the method will be called when one of the given fields is modified. The method is invoked on a pseudo-record that contains the values present in the form. Field assignments on that record are automatically sent back to the client. """ return lambda method: decorate(method, '_onchange', args) def depends(*args): """ Return a decorator that specifies the field dependencies of a "compute" method (for new-style function fields). Each argument must be a string that consists in a dot-separated sequence of field names:: pname = fields.Char(compute='_compute_pname') @api.one @api.depends('partner_id.name', 'partner_id.is_company') def _compute_pname(self): if self.partner_id.is_company: self.pname = (self.partner_id.name or "").upper() else: self.pname = self.partner_id.name One may also pass a single function as argument. In that case, the dependencies are given by calling the function with the field's model. """ if args and callable(args[0]): args = args[0] elif any('id' in arg.split('.') for arg in args): raise NotImplementedError("Compute method cannot depend on field 'id'.") return lambda method: decorate(method, '_depends', args) def returns(model, downgrade=None): """ Return a decorator for methods that return instances of `model`. :param model: a model name, or ``'self'`` for the current model :param downgrade: a function `downgrade(value)` to convert the record-style `value` to a traditional-style output The decorator adapts the method output to the api style: `id`, `ids` or ``False`` for the traditional style, and recordset for the record style:: @model @returns('res.partner') def find_partner(self, arg): ... # return some record # output depends on call style: traditional vs record style partner_id = model.find_partner(cr, uid, arg, context=context) # recs = model.browse(cr, uid, ids, context) partner_record = recs.find_partner(arg) Note that the decorated method must satisfy that convention. Those decorators are automatically *inherited*: a method that overrides a decorated existing method will be decorated with the same ``@returns(model)``. """ return lambda method: decorate(method, '_returns', (model, downgrade)) def make_wrapper(decorator, method, old_api, new_api): """ Return a wrapper method for `method`. """ def wrapper(self, *args, **kwargs): # avoid hasattr(self, '_ids') because __getattr__() is overridden if '_ids' in self.__dict__: return new_api(self, *args, **kwargs) else: return old_api(self, *args, **kwargs) # propagate specific openerp attributes from method to wrapper for attr in WRAPPED_ATTRS: if hasattr(method, attr): setattr(wrapper, attr, getattr(method, attr)) wrapper._api = decorator wrapper._orig = method return wrapper def get_downgrade(method): """ Return a function `downgrade(value)` that adapts `value` from record-style to traditional-style, following the convention of `method`. """ spec = getattr(method, '_returns', None) if spec: model, downgrade = spec return downgrade or (lambda value: value.ids) else: return lambda value: value def get_upgrade(method): """ Return a function `upgrade(self, value)` that adapts `value` from traditional-style to record-style, following the convention of `method`. """ spec = getattr(method, '_returns', None) if spec: model, downgrade = spec if model == 'self': return lambda self, value: self.browse(value) else: return lambda self, value: self.env[model].browse(value) else: return lambda self, value: value def get_aggregate(method): """ Return a function `aggregate(self, value)` that aggregates record-style `value` for a method decorated with ``@one``. """ spec = getattr(method, '_returns', None) if spec: # value is a list of instances, concatenate them model, downgrade = spec if model == 'self': return lambda self, value: sum(value, self.browse()) else: return lambda self, value: sum(value, self.env[model].browse()) else: return lambda self, value: value def get_context_split(method): """ Return a function `split` that extracts the context from a pair of positional and keyword arguments:: context, args, kwargs = split(args, kwargs) """ pos = len(getargspec(method).args) - 1 def split(args, kwargs): if pos < len(args): return args[pos], args[:pos], kwargs else: return kwargs.pop('context', None), args, kwargs return split def model(method): """ Decorate a record-style method where `self` is a recordset, but its contents is not relevant, only the model is. Such a method:: @api.model def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) Notice that no `ids` are passed to the method in the traditional style. """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, [], context) result = method(recs, *args, **kwargs) return downgrade(result) return make_wrapper(model, method, old_api, method) def multi(method): """ Decorate a record-style method where `self` is a recordset. The method typically defines an operation on records. Such a method:: @api.multi def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = method(recs, *args, **kwargs) return downgrade(result) return make_wrapper(multi, method, old_api, method) def one(method): """ Decorate a record-style method where `self` is expected to be a singleton instance. The decorated method automatically loops on records, and makes a list with the results. In case the method is decorated with @returns, it concatenates the resulting instances. Such a method:: @api.one def method(self, args): return self.name may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) names = recs.method(args) names = model.method(cr, uid, ids, args, context=context) """ split = get_context_split(method) downgrade = get_downgrade(method) aggregate = get_aggregate(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = new_api(recs, *args, **kwargs) return downgrade(result) def new_api(self, *args, **kwargs): result = [method(rec, *args, **kwargs) for rec in self] return aggregate(self, result) return make_wrapper(one, method, old_api, new_api) def cr(method): """ Decorate a traditional-style method that takes `cr` as a parameter. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, args) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr, method, method, new_api) def cr_context(method): """ Decorate a traditional-style method that takes `cr`, `context` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_context, method, method, new_api) def cr_uid(method): """ Decorate a traditional-style method that takes `cr`, `uid` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid, method, method, new_api) def cr_uid_context(method): """ Decorate a traditional-style method that takes `cr`, `uid`, `context` as parameters. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, uid, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_context, method, method, new_api) def cr_uid_id(method): """ Decorate a traditional-style method that takes `cr`, `uid`, `id` as parameters. Such a method may be called in both record and traditional styles. In the record style, the method automatically loops on records. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids] return upgrade(self, result) return make_wrapper(cr_uid_id, method, method, new_api) def cr_uid_id_context(method): """ Decorate a traditional-style method that takes `cr`, `uid`, `id`, `context` as parameters. Such a method:: @api.cr_uid_id def method(self, cr, uid, id, args, context=None): ... may be called in both record and traditional styles, like:: # rec = model.browse(cr, uid, id, context) rec.method(args) model.method(cr, uid, id, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids] return upgrade(self, result) return make_wrapper(cr_uid_id_context, method, method, new_api) def cr_uid_ids(method): """ Decorate a traditional-style method that takes `cr`, `uid`, `ids` as parameters. Such a method may be called in both record and traditional styles. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, self.ids, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_ids, method, method, new_api) def cr_uid_ids_context(method): """ Decorate a traditional-style method that takes `cr`, `uid`, `ids`, `context` as parameters. Such a method:: @api.cr_uid_ids_context def method(self, cr, uid, ids, args, context=None): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) It is generally not necessary, see :func:`guess`. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, uid, self.ids, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_ids_context, method, method, new_api) def v7(method_v7): """ Decorate a method that supports the old-style api only. A new-style api may be provided by redefining a method with the same name and decorated with :func:`~.v8`:: @api.v7 def foo(self, cr, uid, ids, context=None): ... @api.v8 def foo(self): ... Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v8 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v7.__name__) method_v8 = getattr(method, '_v8', method) wrapper = make_wrapper(v7, method_v7, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def v8(method_v8): """ Decorate a method that supports the new-style api only. An old-style api may be provided by redefining a method with the same name and decorated with :func:`~.v7`:: @api.v8 def foo(self): ... @api.v7 def foo(self, cr, uid, ids, context=None): ... Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v7 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v8.__name__) method_v7 = getattr(method, '_v7', method) wrapper = make_wrapper(v8, method_v8, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def noguess(method): """ Decorate a method to prevent any effect from :func:`guess`. """ method._api = False return method def guess(method): """ Decorate `method` to make it callable in both traditional and record styles. This decorator is applied automatically by the model's metaclass, and has no effect on already-decorated methods. The API style is determined by heuristics on the parameter names: ``cr`` or ``cursor`` for the cursor, ``uid`` or ``user`` for the user id, ``id`` or ``ids`` for a list of record ids, and ``context`` for the context dictionary. If a traditional API is recognized, one of the decorators :func:`cr`, :func:`cr_context`, :func:`cr_uid`, :func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`, :func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method. Method calls are considered traditional style when their first parameter is a database cursor. """ if hasattr(method, '_api'): return method # introspection on argument names to determine api style args, vname, kwname, defaults = getargspec(method) names = tuple(args) + (None,) * 4 if names[0] == 'self': if names[1] in ('cr', 'cursor'): if names[2] in ('uid', 'user'): if names[3] == 'ids': if 'context' in names or kwname: return cr_uid_ids_context(method) else: return cr_uid_ids(method) elif names[3] == 'id' or names[3] == 'res_id': if 'context' in names or kwname: return cr_uid_id_context(method) else: return cr_uid_id(method) elif 'context' in names or kwname: return cr_uid_context(method) else: return cr_uid(method) elif 'context' in names: return cr_context(method) else: return cr(method) # no wrapping by default return noguess(method) def expected(decorator, func): """ Decorate `func` with `decorator` if `func` is not wrapped yet. """ return decorator(func) if not hasattr(func, '_api') else func class Environment(object): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It also provides access to the registry, a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = RegistryManager.get(cr.dbname) self.cache = defaultdict(dict) # {field: {id: value, ...}, ...} self.prefetch = defaultdict(set) # {model_name: set(id), ...} self.computed = defaultdict(set) # {field: set(id), ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self def __getitem__(self, model_name): """ return a given model """ return self.registry[model_name]._browse(self, ()) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on `self` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given `xml_id` """ return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.all.mode: yield else: try: self.all.mode = mode yield finally: self.all.mode = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.all.mode) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.all.mode == 'onchange' def invalidate(self, spec): """ Invalidate some fields for some records in the cache of all environments. :param spec: what to invalidate, a list of `(field, ids)` pair, where `field` is a field object, and `ids` is a list of record ids or ``None`` (to invalidate all records). """ if not spec: return for env in list(self.all): c = env.cache for field, ids in spec: if ids is None: if field in c: del c[field] else: field_cache = c[field] for id in ids: field_cache.pop(id, None) def invalidate_all(self): """ Clear the cache of all environments. """ for env in list(self.all): env.cache.clear() env.prefetch.clear() env.computed.clear() env.dirty.clear() def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.invalidate_all() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def field_todo(self, field): """ Check whether `field` must be recomputed, and returns a recordset with all records to recompute for `field`. """ if field in self.all.todo: return reduce(operator.or_, self.all.todo[field]) def check_todo(self, field, record): """ Check whether `field` must be recomputed on `record`, and if so, returns the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark `field` to be recomputed on `records`. """ recs_list = self.all.todo.setdefault(field, []) recs_list.append(records) def remove_todo(self, field, records): """ Mark `field` as recomputed on `records`. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = filter(None, recs_list) if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair `(field, records)` to recompute. """ for field, recs_list in self.all.todo.iteritems(): return field, recs_list[0] def check_cache(self): """ Check the cache consistency. """ # make a full copy of the cache, and invalidate it cache_dump = dict( (field, dict(field_cache)) for field, field_cache in self.cache.iteritems() ) self.invalidate_all() # re-fetch the records, and compare with their former cache invalids = [] for field, field_dump in cache_dump.iteritems(): ids = filter(None, field_dump) records = self[field.model_name].browse(ids) for record in records: try: cached = field_dump[record.id] fetched = record[field.name] if fetched != cached: info = {'cached': cached, 'fetched': fetched} invalids.append((field, record, info)) except (AccessError, MissingError): pass if invalids: raise Warning('Invalid cache for fields\n' + pformat(invalids)) @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp @property def recompute_old(self): return self.all.recompute_old def clear_recompute_old(self): del self.all.recompute_old[:] class Environments(object): """ A common object for all environments in a request. """ def __init__(self): self.envs = WeakSet() # weak set of environments self.todo = {} # recomputations {field: [records]} self.mode = False # flag for draft/onchange self.recompute = True self.recompute_old = [] # list of old api compute fields to recompute def add(self, env): """ Add the environment `env`. """ self.envs.add(env) def __iter__(self): """ Iterate over environments. """ return iter(self.envs) # keep those imports here in order to handle cyclic dependencies correctly from openerp import SUPERUSER_ID from openerp.exceptions import Warning, AccessError, MissingError from openerp.modules.registry import RegistryManager
gangadharkadam/johnerp
refs/heads/develop
erpnext/patches/v4_0/update_custom_print_formats_for_renamed_fields.py
39
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import re def execute(): # NOTE: sequence is important fields_list = ( ("amount", "base_amount"), ("ref_rate", "price_list_rate"), ("base_ref_rate", "base_price_list_rate"), ("adj_rate", "discount_percentage"), ("export_rate", "rate"), ("basic_rate", "base_rate"), ("export_amount", "amount"), ("reserved_warehouse", "warehouse"), ("import_ref_rate", "price_list_rate"), ("purchase_ref_rate", "base_price_list_rate"), ("discount_rate", "discount_percentage"), ("import_rate", "rate"), ("purchase_rate", "base_rate"), ("import_amount", "amount") ) condition = " or ".join("""html like "%%{}%%" """.format(d[0].replace("_", "\\_")) for d in fields_list if d[0] != "amount") for name, html in frappe.db.sql("""select name, html from `tabPrint Format` where standard = 'No' and ({}) and html not like '%%frappe.%%'""".format(condition)): html = html.replace("wn.", "frappe.") for from_field, to_field in fields_list: html = re.sub(r"\b{}\b".format(from_field), to_field, html) frappe.db.set_value("Print Format", name, "html", html)
vladmm/intellij-community
refs/heads/master
python/lib/Lib/site-packages/django/db/models/sql/expressions.py
323
from django.core.exceptions import FieldError from django.db.models.fields import FieldDoesNotExist from django.db.models.sql.constants import LOOKUP_SEP class SQLEvaluator(object): def __init__(self, expression, query, allow_joins=True): self.expression = expression self.opts = query.get_meta() self.cols = {} self.contains_aggregate = False self.expression.prepare(self, query, allow_joins) def prepare(self): return self def as_sql(self, qn, connection): return self.expression.evaluate(self, qn, connection) def relabel_aliases(self, change_map): for node, col in self.cols.items(): if hasattr(col, "relabel_aliases"): col.relabel_aliases(change_map) else: self.cols[node] = (change_map.get(col[0], col[0]), col[1]) ##################################################### # Vistor methods for initial expression preparation # ##################################################### def prepare_node(self, node, query, allow_joins): for child in node.children: if hasattr(child, 'prepare'): child.prepare(self, query, allow_joins) def prepare_leaf(self, node, query, allow_joins): if not allow_joins and LOOKUP_SEP in node.name: raise FieldError("Joined field references are not permitted in this query") field_list = node.name.split(LOOKUP_SEP) if (len(field_list) == 1 and node.name in query.aggregate_select.keys()): self.contains_aggregate = True self.cols[node] = query.aggregate_select[node.name] else: try: field, source, opts, join_list, last, _ = query.setup_joins( field_list, query.get_meta(), query.get_initial_alias(), False) col, _, join_list = query.trim_joins(source, join_list, last, False) self.cols[node] = (join_list[-1], col) except FieldDoesNotExist: raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (self.name, [f.name for f in self.opts.fields])) ################################################## # Vistor methods for final expression evaluation # ################################################## def evaluate_node(self, node, qn, connection): expressions = [] expression_params = [] for child in node.children: if hasattr(child, 'evaluate'): sql, params = child.evaluate(self, qn, connection) else: sql, params = '%s', (child,) if len(getattr(child, 'children', [])) > 1: format = '(%s)' else: format = '%s' if sql: expressions.append(format % sql) expression_params.extend(params) return connection.ops.combine_expression(node.connector, expressions), expression_params def evaluate_leaf(self, node, qn, connection): col = self.cols[node] if hasattr(col, 'as_sql'): return col.as_sql(qn, connection), () else: return '%s.%s' % (qn(col[0]), qn(col[1])), () def evaluate_date_modifier_node(self, node, qn, connection): timedelta = node.children.pop() sql, params = self.evaluate_node(node, qn, connection) if timedelta.days == 0 and timedelta.seconds == 0 and \ timedelta.microseconds == 0: return sql, params return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
tszym/ansible
refs/heads/devel
hacking/report.py
46
#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """A tool to aggregate data about Ansible source and testing into a sqlite DB for reporting.""" from __future__ import (absolute_import, print_function) import argparse import os import requests import sqlite3 import sys DATABASE_PATH = os.path.expanduser('~/.ansible/report.db') BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) + '/' ANSIBLE_PATH = os.path.join(BASE_PATH, 'lib') ANSIBLE_TEST_PATH = os.path.join(BASE_PATH, 'test/runner') if ANSIBLE_PATH not in sys.path: sys.path.insert(0, ANSIBLE_PATH) if ANSIBLE_TEST_PATH not in sys.path: sys.path.insert(0, ANSIBLE_TEST_PATH) from ansible.parsing.metadata import extract_metadata from lib.target import walk_integration_targets def main(): os.chdir(BASE_PATH) args = parse_args() args.func() def parse_args(): try: import argcomplete except ImportError: argcomplete = None parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(metavar='COMMAND') subparsers.required = True # work-around for python 3 bug which makes subparsers optional populate = subparsers.add_parser('populate', help='populate report database') populate.set_defaults(func=populate_database) query = subparsers.add_parser('query', help='query report database') query.set_defaults(func=query_database) if argcomplete: argcomplete.autocomplete(parser) args = parser.parse_args() return args def query_database(): if not os.path.exists(DATABASE_PATH): exit('error: Database not found. Did you run `report.py populate` first?') os.execvp('sqlite3', ('sqlite3', DATABASE_PATH)) def populate_database(): populate_modules() populate_coverage() populate_integration_targets() def populate_modules(): module_dir = os.path.join(BASE_PATH, 'lib/ansible/modules/') modules_rows = [] module_statuses_rows = [] for root, dir_names, file_names in os.walk(module_dir): for file_name in file_names: module, extension = os.path.splitext(file_name) if module == '__init__' or extension != '.py': continue if module.startswith('_'): module = module[1:] namespace = os.path.join(root.replace(module_dir, '')).replace('/', '.') path = os.path.join(root, file_name) with open(path, 'rb') as module_fd: module_data = module_fd.read() result = extract_metadata(module_data=module_data) metadata = result[0] if not metadata: if module == 'async_wrapper': continue raise Exception('no metadata for: %s' % path) modules_rows.append(dict( module=module, namespace=namespace, path=path.replace(BASE_PATH, ''), supported_by=metadata['supported_by'], )) for status in metadata['status']: module_statuses_rows.append(dict( module=module, status=status, )) populate_data(dict( modules=dict( rows=modules_rows, schema=( ('module', 'TEXT'), ('namespace', 'TEXT'), ('path', 'TEXT'), ('supported_by', 'TEXT'), )), module_statuses=dict( rows=module_statuses_rows, schema=( ('module', 'TEXT'), ('status', 'TEXT'), )), )) def populate_coverage(): response = requests.get('https://codecov.io/api/gh/ansible/ansible/tree/devel/?src=extension') data = response.json() files = data['commit']['report']['files'] coverage_rows = [] for path, data in files.items(): report = data['t'] coverage_rows.append(dict( path=path, coverage=float(report['c']), lines=report['n'], hit=report['h'], partial=report['p'], missed=report['m'], )) populate_data(dict( coverage=dict( rows=coverage_rows, schema=( ('path', 'TEXT'), ('coverage', 'REAL'), ('lines', 'INTEGER'), ('hit', 'INTEGER'), ('partial', 'INTEGER'), ('missed', 'INTEGER'), )), )) def populate_integration_targets(): targets = list(walk_integration_targets()) integration_targets_rows = [dict( target=target.name, type=target.type, path=target.path, script_path=target.script_path, ) for target in targets] integration_target_aliases_rows = [dict( target=target.name, alias=alias, ) for target in targets for alias in target.aliases] integration_target_modules_rows = [dict( target=target.name, module=module, ) for target in targets for module in target.modules] populate_data(dict( integration_targets=dict( rows=integration_targets_rows, schema=( ('target', 'TEXT'), ('type', 'TEXT'), ('path', 'TEXT'), ('script_path', 'TEXT'), )), integration_target_aliases=dict( rows=integration_target_aliases_rows, schema=( ('target', 'TEXT'), ('alias', 'TEXT'), )), integration_target_modules=dict( rows=integration_target_modules_rows, schema=( ('target', 'TEXT'), ('module', 'TEXT'), )), )) def create_table(cursor, name, columns): schema = ', '.join('%s %s' % column for column in columns) cursor.execute('DROP TABLE IF EXISTS %s' % name) cursor.execute('CREATE TABLE %s (%s)' % (name, schema)) def populate_table(cursor, rows, name, columns): create_table(cursor, name, columns) values = ', '.join([':%s' % column[0] for column in columns]) for row in rows: cursor.execute('INSERT INTO %s VALUES (%s)' % (name, values), row) def populate_data(data): connection = sqlite3.connect(DATABASE_PATH) cursor = connection.cursor() for table in data: populate_table(cursor, data[table]['rows'], table, data[table]['schema']) connection.commit() connection.close() if __name__ == '__main__': main()
aiguofer/bokeh
refs/heads/master
examples/compat/mpl_plot.py
11
import matplotlib.pyplot as plt import numpy as np from bokeh import mpl from bokeh.plotting import output_file, show x = np.linspace(-2 * np.pi, 2 * np.pi, 100) y = np.sin(x) z = np.cos(x) plt.plot(x, y, "r-", marker='o') plt.title("Matplotlib Figure in Bokeh") # dashed lines work plt.plot(x, z, "g-x", linestyle="-.") output_file("mpl_plot.html", title="mpl_plot.py example") show(mpl.to_bokeh())
shvets/etvnet-plex-plugin
refs/heads/master
ssh_client.py
3
#!/usr/bin/python from StringIO import StringIO import paramiko import getpass # setup logging #paramiko.util.log_to_file('demo_simple.log') class SshClient: "A wrapper of paramiko.SSHClient" TIMEOUT = 4 def __init__(self, host, port, username, password, key=None, passphrase=None): self.username = username self.password = password self.ssh = paramiko.SSHClient() # ssh.load_system_host_keys() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # ssh.set_missing_host_key_policy(paramiko.WarningPolicy()) if key is not None: key = paramiko.RSAKey.from_private_key(StringIO(key), password=passphrase) self.ssh.connect(host, port, username=username, password=password, pkey=key, timeout=self.TIMEOUT) def close(self): if self.ssh is not None: self.ssh.close() self.ssh = None def execute(self, command, sudo=False): feed_password = False if sudo and self.username != "root": command = "%s" % command feed_password = self.password is not None and len(self.password) > 0 stdin, stdout, stderr = self.ssh.exec_command(command) if feed_password: stdin.write(self.password + "\n") stdin.flush() return { 'out': stdout.readlines(), 'err': stderr.readlines(), 'retval': stdout.channel.recv_exit_status() } if __name__ == "__main__": username = 'alex' host = '10.0.1.37' password = getpass.getpass('Password for %s@%s: ' % (username, host)) client = SshClient(host=host, port=22, username=username, password=password) try: ret = client.execute('dmesg', sudo=True) print " ".join(ret["out"]), " E ".join(ret["err"]), ret["retval"] finally: client.close()
YuriyIlyin/ansible-modules-core
refs/heads/devel
cloud/amazon/ec2_ami_find.py
32
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: ec2_ami_find version_added: 2.0 short_description: Searches for AMIs to obtain the AMI ID and other information description: - Returns list of matching AMIs with AMI ID, along with other useful information - Can search AMIs with different owners - Can search by matching tag(s), by AMI name and/or other criteria - Results can be sorted and sliced author: "Tom Bamford (@tombamford)" notes: - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - See the example below for a suggestion of how to search by distro/release. options: region: description: - The AWS region to use. required: true aliases: [ 'aws_region', 'ec2_region' ] owner: description: - Search AMIs owned by the specified owner - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' - If not specified, all EC2 AMIs in the specified region will be searched. - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. required: false default: null ami_id: description: - An AMI ID to match. default: null required: false ami_tags: description: - A hash/dictionary of tags to match for the AMI. default: null required: false architecture: description: - An architecture type to match (e.g. x86_64). default: null required: false hypervisor: description: - A hypervisor type type to match (e.g. xen). default: null required: false is_public: description: - Whether or not the image(s) are public. choices: ['yes', 'no'] default: null required: false name: description: - An AMI name to match. default: null required: false platform: description: - Platform type to match. default: null required: false sort: description: - Optional attribute which with to sort the results. - If specifying 'tag', the 'tag_name' parameter is required. choices: ['name', 'description', 'tag'] default: null required: false sort_tag: description: - Tag name with which to sort results. - Required when specifying 'sort=tag'. default: null required: false sort_order: description: - Order in which to sort results. - Only used when the 'sort' parameter is specified. choices: ['ascending', 'descending'] default: 'ascending' required: false sort_start: description: - Which result to start with (when sorting). - Corresponds to Python slice notation. default: null required: false sort_end: description: - Which result to end with (when sorting). - Corresponds to Python slice notation. default: null required: false state: description: - AMI state to match. default: 'available' required: false virtualization_type: description: - Virtualization type to match (e.g. hvm). default: null required: false no_result_action: description: - What to do when no results are found. - "'success' reports success and returns an empty array" - "'fail' causes the module to report failure" choices: ['success', 'fail'] default: 'success' required: false requirements: - "python >= 2.6" - boto ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Search for the AMI tagged "project:website" - ec2_ami_find: owner: self ami_tags: project: website no_result_action: fail register: ami_find # Search for the latest Ubuntu 14.04 AMI - ec2_ami_find: name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" owner: 099720109477 sort: name sort_order: descending sort_end: 1 register: ami_find # Launch an EC2 instance - ec2: image: "{{ ami_find.results[0].ami_id }}" instance_type: m3.medium key_name: mykey wait: yes ''' try: import boto.ec2 HAS_BOTO=True except ImportError: HAS_BOTO=False import json def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( region = dict(required=True, aliases = ['aws_region', 'ec2_region']), owner = dict(required=False, default=None), ami_id = dict(required=False), ami_tags = dict(required=False, type='dict', aliases = ['search_tags', 'image_tags']), architecture = dict(required=False), hypervisor = dict(required=False), is_public = dict(required=False), name = dict(required=False), platform = dict(required=False), sort = dict(required=False, default=None, choices=['name', 'description', 'tag']), sort_tag = dict(required=False), sort_order = dict(required=False, default='ascending', choices=['ascending', 'descending']), sort_start = dict(required=False), sort_end = dict(required=False), state = dict(required=False, default='available'), virtualization_type = dict(required=False), no_result_action = dict(required=False, default='success', choices = ['success', 'fail']), ) ) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO: module.fail_json(msg='boto required for this module, install via pip or your package manager') ami_id = module.params.get('ami_id') ami_tags = module.params.get('ami_tags') architecture = module.params.get('architecture') hypervisor = module.params.get('hypervisor') is_public = module.params.get('is_public') name = module.params.get('name') owner = module.params.get('owner') platform = module.params.get('platform') sort = module.params.get('sort') sort_tag = module.params.get('sort_tag') sort_order = module.params.get('sort_order') sort_start = module.params.get('sort_start') sort_end = module.params.get('sort_end') state = module.params.get('state') virtualization_type = module.params.get('virtualization_type') no_result_action = module.params.get('no_result_action') filter = {'state': state} if ami_id: filter['image_id'] = ami_id if ami_tags: for tag in ami_tags: filter['tag:'+tag] = ami_tags[tag] if architecture: filter['architecture'] = architecture if hypervisor: filter['hypervisor'] = hypervisor if is_public: filter['is_public'] = is_public if name: filter['name'] = name if platform: filter['platform'] = platform if virtualization_type: filter['virtualization_type'] = virtualization_type ec2 = ec2_connect(module) images_result = ec2.get_all_images(owners=owner, filters=filter) if no_result_action == 'fail' and len(images_result) == 0: module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) results = [] for image in images_result: data = { 'ami_id': image.id, 'architecture': image.architecture, 'description': image.description, 'is_public': image.is_public, 'name': image.name, 'owner_id': image.owner_id, 'platform': image.platform, 'root_device_name': image.root_device_name, 'root_device_type': image.root_device_type, 'state': image.state, 'tags': image.tags, 'virtualization_type': image.virtualization_type, } if image.kernel_id: data['kernel_id'] = image.kernel_id if image.ramdisk_id: data['ramdisk_id'] = image.ramdisk_id results.append(data) if sort == 'tag': if not sort_tag: module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) elif sort: results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) try: if sort and sort_start and sort_end: results = results[int(sort_start):int(sort_end)] elif sort and sort_start: results = results[int(sort_start):] elif sort and sort_end: results = results[:int(sort_end)] except TypeError: module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") module.exit_json(results=results) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
harnash/selene_drf
refs/heads/master
web/apps/core/settings/base.py
1
# Base settings file import os import sys from unipath import Path from django.core.exceptions import ImproperlyConfigured import dj_database_url PROJECT_ROOT = Path(__file__).ancestor(4) def get_env_var(varname, default=None): """Get the environment variable or raise an exception.""" try: return os.environ[varname] except KeyError: if default is not None: return default msg = "You must set the {0} environment variable.".format(varname) raise ImproperlyConfigured(msg) ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': dj_database_url.parse(get_env_var('CORE_DATABASE_URL')) } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = PROJECT_ROOT.child('static').child('media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = PROJECT_ROOT.child('static').child('_build') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = () # Make this unique, and don't share it with anybody. SECRET_KEY = get_env_var('CORE_SECRET_KEY') MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'apps.core.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'core.wsgi.application' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ PROJECT_ROOT.child('templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] INSTALLED_APPS = ( # Django apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'django.contrib.messages', # 'django.contrib.sites', # Third party apps 'rest_framework', 'rest_framework.authtoken', # 'allauth', # 'allauth.account', # 'allauth.socialaccount', # 'allauth.socialaccount.providers.facebook', # 'rest_auth', # 'rest_auth.registration', 'oauth2_provider', # local apps 'apps.wikia_auth', ) ## Django REST Framework Settings REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'oauth2_provider.ext.rest_framework.OAuth2Authentication', ), 'DEFAULT_RENDERER_CLASSES': ( 'drf_ujson.renderers.UJSONRenderer', 'rest_framework.renderers.BrowsableAPIRenderer', ), 'DEFAULT_PARSER_CLASSES': ( 'drf_ujson.parsers.UJSONParser', 'rest_framework.parsers.FormParser', 'rest_framework.parsers.MultiPartParser', ), 'PAGE_SIZE': 10 } AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin, regardless of `allauth` 'django.contrib.auth.backends.ModelBackend', # `allauth` specific authentication methods, such as login by e-mail # 'allauth.account.auth_backends.AuthenticationBackend', ) ACCOUNT_ACTIVATION_DAYS = 7 # AUTH_USER_MODEL = 'path.to.UserModel' SITE_ID = 1 # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'log_to_stdout': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', # 'formatter': 'colored', 'stream': sys.stdout, }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
insionng/p2pool
refs/heads/master
p2pool/work.py
42
from __future__ import division import base64 import random import re import sys import time from twisted.internet import defer from twisted.python import log import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data from bitcoin import helper, script, worker_interface from util import forest, jsonrpc, variable, deferral, math, pack import p2pool, p2pool.data as p2pool_data class WorkerBridge(worker_interface.WorkerBridge): COINBASE_NONCE_LENGTH = 8 def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee): worker_interface.WorkerBridge.__init__(self) self.recent_shares_ts_work = [] self.node = node self.my_pubkey_hash = my_pubkey_hash self.donation_percentage = donation_percentage self.worker_fee = worker_fee self.net = self.node.net.PARENT self.running = True self.pseudoshare_received = variable.Event() self.share_received = variable.Event() self.local_rate_monitor = math.RateMonitor(10*60) self.local_addr_rate_monitor = math.RateMonitor(10*60) self.removed_unstales_var = variable.Variable((0, 0, 0)) self.removed_doa_unstales_var = variable.Variable(0) self.my_share_hashes = set() self.my_doa_share_hashes = set() self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs, my_count=lambda share: 1 if share.hash in self.my_share_hashes else 0, my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0, my_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0, my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0, ))) @self.node.tracker.verified.removed.watch def _(share): if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value): assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance self.removed_unstales_var.set(( self.removed_unstales_var.value[0] + 1, self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0), self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0), )) if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value): self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1) # MERGED WORK self.merged_work = variable.Variable({}) @defer.inlineCallbacks def set_merged_work(merged_url, merged_userpass): merged_proxy = jsonrpc.HTTPProxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass))) while self.running: auxblock = yield deferral.retry('Error while calling merged getauxblock on %s:' % (merged_url,), 30)(merged_proxy.rpc_getauxblock)() self.merged_work.set(math.merge_dicts(self.merged_work.value, {auxblock['chainid']: dict( hash=int(auxblock['hash'], 16), target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')), merged_proxy=merged_proxy, )})) yield deferral.sleep(1) for merged_url, merged_userpass in merged_urls: set_merged_work(merged_url, merged_userpass) @self.merged_work.changed.watch def _(new_merged_work): print 'Got new merged mining work!' # COMBINE WORK self.current_work = variable.Variable(None) def compute_work(): t = self.node.bitcoind_work.value bb = self.node.best_block_header.value if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target: print 'Skipping from block %x to block %x!' % (bb['previous_block'], bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb))) t = dict( version=bb['version'], previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)), bits=bb['bits'], # not always true coinbaseflags='', height=t['height'] + 1, time=bb['timestamp'] + 600, # better way? transactions=[], transaction_fees=[], merkle_link=bitcoin_data.calculate_merkle_link([None], 0), subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']), last_update=self.node.bitcoind_work.value['last_update'], ) self.current_work.set(t) self.node.bitcoind_work.changed.watch(lambda _: compute_work()) self.node.best_block_header.changed.watch(lambda _: compute_work()) compute_work() self.new_work_event = variable.Event() @self.current_work.transitioned.watch def _(before, after): # trigger LP if version/previous_block/bits changed or transactions changed from nothing if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']): self.new_work_event.happened() self.merged_work.changed.watch(lambda _: self.new_work_event.happened()) self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened()) def stop(self): self.running = False def get_stale_counts(self): '''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)''' my_shares = len(self.my_share_hashes) my_doa_shares = len(self.my_doa_share_hashes) delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value) my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0] my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1] doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2] my_shares_not_in_chain = my_shares - my_shares_in_chain my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain) def get_user_details(self, username): contents = re.split('([+/])', username) assert len(contents) % 2 == 1 user, contents2 = contents[0], contents[1:] desired_pseudoshare_target = None desired_share_target = None for symbol, parameter in zip(contents2[::2], contents2[1::2]): if symbol == '+': try: desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter)) except: if p2pool.DEBUG: log.err() elif symbol == '/': try: desired_share_target = bitcoin_data.difficulty_to_target(float(parameter)) except: if p2pool.DEBUG: log.err() if random.uniform(0, 100) < self.worker_fee: pubkey_hash = self.my_pubkey_hash else: try: pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT) except: # XXX blah pubkey_hash = self.my_pubkey_hash return user, pubkey_hash, desired_share_target, desired_pseudoshare_target def preprocess_request(self, user): if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST: raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers') if time.time() > self.current_work.value['last_update'] + 60: raise jsonrpc.Error_for_code(-12345)(u'lost contact with bitcoind') user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(user) return pubkey_hash, desired_share_target, desired_pseudoshare_target def _estimate_local_hash_rate(self): if len(self.recent_shares_ts_work) == 50: hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0]) if hash_rate: return hash_rate return None def get_local_rates(self): miner_hash_rates = {} miner_dead_hash_rates = {} datums, dt = self.local_rate_monitor.get_datums_in_last() for datum in datums: miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt if datum['dead']: miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt return miner_hash_rates, miner_dead_hash_rates def get_local_addr_rates(self): addr_hash_rates = {} datums, dt = self.local_addr_rate_monitor.get_datums_in_last() for datum in datums: addr_hash_rates[datum['pubkey_hash']] = addr_hash_rates.get(datum['pubkey_hash'], 0) + datum['work']/dt return addr_hash_rates def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target): if self.node.best_share_var.value is None and self.node.net.PERSIST: raise jsonrpc.Error_for_code(-12345)(u'p2pool is downloading shares') if self.merged_work.value: tree, size = bitcoin_data.make_auxpow_tree(self.merged_work.value) mm_hashes = [self.merged_work.value.get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)] mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict( merkle_root=bitcoin_data.merkle_hash(mm_hashes), size=size, nonce=0, )) mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in self.merged_work.value.iteritems()] else: mm_data = '' mm_later = [] tx_hashes = [bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in self.current_work.value['transactions']] tx_map = dict(zip(tx_hashes, self.current_work.value['transactions'])) previous_share = self.node.tracker.items[self.node.best_share_var.value] if self.node.best_share_var.value is not None else None if previous_share is None: share_type = p2pool_data.Share else: previous_share_type = type(previous_share) if previous_share_type.SUCCESSOR is None or self.node.tracker.get_height(previous_share.hash) < self.node.net.CHAIN_LENGTH: share_type = previous_share_type else: successor_type = previous_share_type.SUCCESSOR counts = p2pool_data.get_desired_version_counts(self.node.tracker, self.node.tracker.get_nth_parent_hash(previous_share.hash, self.node.net.CHAIN_LENGTH*9//10), self.node.net.CHAIN_LENGTH//10) upgraded = counts.get(successor_type.VERSION, 0)/sum(counts.itervalues()) if upgraded > .65: print 'Switchover imminent. Upgraded: %.3f%% Threshold: %.3f%%' % (upgraded*100, 95) print # Share -> NewShare only valid if 95% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version if counts.get(successor_type.VERSION, 0) > sum(counts.itervalues())*95//100: share_type = successor_type else: share_type = previous_share_type if desired_share_target is None: desired_share_target = 2**256-1 local_hash_rate = self._estimate_local_hash_rate() if local_hash_rate is not None: desired_share_target = min(desired_share_target, bitcoin_data.average_attempts_to_target(local_hash_rate * self.node.net.SHARE_PERIOD / 0.0167)) # limit to 1.67% of pool shares by modulating share difficulty local_addr_rates = self.get_local_addr_rates() lookbehind = 3600//self.node.net.SHARE_PERIOD block_subsidy = self.node.bitcoind_work.value['subsidy'] if previous_share is not None and self.node.tracker.get_height(previous_share.hash) > lookbehind: expected_payout_per_block = local_addr_rates.get(pubkey_hash, 0)/p2pool_data.get_pool_attempts_per_second(self.node.tracker, self.node.best_share_var.value, lookbehind) \ * block_subsidy*(1-self.donation_percentage/100) # XXX doesn't use global stale rate to compute pool hash if expected_payout_per_block < self.node.net.PARENT.DUST_THRESHOLD: desired_share_target = min(desired_share_target, bitcoin_data.average_attempts_to_target((bitcoin_data.target_to_average_attempts(self.node.bitcoind_work.value['bits'].target)*self.node.net.SPREAD)*self.node.net.PARENT.DUST_THRESHOLD/block_subsidy) ) if True: share_info, gentx, other_transaction_hashes, get_share = share_type.generate_transaction( tracker=self.node.tracker, share_data=dict( previous_share_hash=self.node.best_share_var.value, coinbase=(script.create_push_script([ self.current_work.value['height'], ] + ([mm_data] if mm_data else []) + [ ]) + self.current_work.value['coinbaseflags'])[:100], nonce=random.randrange(2**32), pubkey_hash=pubkey_hash, subsidy=self.current_work.value['subsidy'], donation=math.perfect_round(65535*self.donation_percentage/100), stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain): 'orphan' if orphans > orphans_recorded_in_chain else 'doa' if doas > doas_recorded_in_chain else None )(*self.get_stale_counts()), desired_version=(share_type.SUCCESSOR if share_type.SUCCESSOR is not None else share_type).VOTING_VERSION, ), block_target=self.current_work.value['bits'].target, desired_timestamp=int(time.time() + 0.5), desired_target=desired_share_target, ref_merkle_link=dict(branch=[], index=0), desired_other_transaction_hashes_and_fees=zip(tx_hashes, self.current_work.value['transaction_fees']), net=self.node.net, known_txs=tx_map, base_subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.current_work.value['height']), ) packed_gentx = bitcoin_data.tx_type.pack(gentx) other_transactions = [tx_map[tx_hash] for tx_hash in other_transaction_hashes] mm_later = [(dict(aux_work, target=aux_work['target'] if aux_work['target'] != 'p2pool' else share_info['bits'].target), index, hashes) for aux_work, index, hashes in mm_later] if desired_pseudoshare_target is None: target = 2**256-1 local_hash_rate = self._estimate_local_hash_rate() if local_hash_rate is not None: target = min(target, bitcoin_data.average_attempts_to_target(local_hash_rate * 1)) # limit to 1 share response every second by modulating pseudoshare difficulty else: target = desired_pseudoshare_target target = max(target, share_info['bits'].target) for aux_work, index, hashes in mm_later: target = max(target, aux_work['target']) target = math.clip(target, self.node.net.PARENT.SANE_TARGET_RANGE) getwork_time = time.time() lp_count = self.new_work_event.times merkle_link = bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0) print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % ( bitcoin_data.target_to_difficulty(target), bitcoin_data.target_to_difficulty(share_info['bits'].target), self.current_work.value['subsidy']*1e-8, self.node.net.PARENT.SYMBOL, len(self.current_work.value['transactions']), ) ba = dict( version=min(self.current_work.value['version'], 2), previous_block=self.current_work.value['previous_block'], merkle_link=merkle_link, coinb1=packed_gentx[:-self.COINBASE_NONCE_LENGTH-4], coinb2=packed_gentx[-4:], timestamp=self.current_work.value['time'], bits=self.current_work.value['bits'], share_target=target, ) received_header_hashes = set() def got_response(header, user, coinbase_nonce): assert len(coinbase_nonce) == self.COINBASE_NONCE_LENGTH new_packed_gentx = packed_gentx[:-self.COINBASE_NONCE_LENGTH-4] + coinbase_nonce + packed_gentx[-4:] if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else packed_gentx new_gentx = bitcoin_data.tx_type.unpack(new_packed_gentx) if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else gentx header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)) pow_hash = self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header)) try: if pow_hash <= header['bits'].target or p2pool.DEBUG: helper.submit_block(dict(header=header, txs=[new_gentx] + other_transactions), False, self.node.factory, self.node.bitcoind, self.node.bitcoind_work, self.node.net) if pow_hash <= header['bits'].target: print print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash) print except: log.err(None, 'Error while processing potential block:') user, _, _, _ = self.get_user_details(user) assert header['previous_block'] == ba['previous_block'] assert header['merkle_root'] == bitcoin_data.check_merkle_link(bitcoin_data.hash256(new_packed_gentx), merkle_link) assert header['bits'] == ba['bits'] on_time = self.new_work_event.times == lp_count for aux_work, index, hashes in mm_later: try: if pow_hash <= aux_work['target'] or p2pool.DEBUG: df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)( pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'), bitcoin_data.aux_pow_type.pack(dict( merkle_tx=dict( tx=new_gentx, block_hash=header_hash, merkle_link=merkle_link, ), merkle_link=bitcoin_data.calculate_merkle_link(hashes, index), parent_block_header=header, )).encode('hex'), ) @df.addCallback def _(result, aux_work=aux_work): if result != (pow_hash <= aux_work['target']): print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target']) else: print 'Merged block submittal result: %s' % (result,) @df.addErrback def _(err): log.err(err, 'Error submitting merged block:') except: log.err(None, 'Error while processing merged mining POW:') if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes: last_txout_nonce = pack.IntType(8*self.COINBASE_NONCE_LENGTH).unpack(coinbase_nonce) share = get_share(header, last_txout_nonce) print 'GOT SHARE! %s %s prev %s age %.2fs%s' % ( user, p2pool_data.format_hash(share.hash), p2pool_data.format_hash(share.previous_hash), time.time() - getwork_time, ' DEAD ON ARRIVAL' if not on_time else '', ) self.my_share_hashes.add(share.hash) if not on_time: self.my_doa_share_hashes.add(share.hash) self.node.tracker.add(share) self.node.set_best_share() try: if (pow_hash <= header['bits'].target or p2pool.DEBUG) and self.node.p2p_node is not None: self.node.p2p_node.broadcast_share(share.hash) except: log.err(None, 'Error forwarding block solution:') self.share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time, share.hash) if pow_hash > target: print 'Worker %s submitted share with hash > target:' % (user,) print ' Hash: %56x' % (pow_hash,) print ' Target: %56x' % (target,) elif header_hash in received_header_hashes: print >>sys.stderr, 'Worker %s submitted share more than once!' % (user,) else: received_header_hashes.add(header_hash) self.pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user) self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target))) while len(self.recent_shares_ts_work) > 50: self.recent_shares_ts_work.pop(0) self.local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user, share_target=share_info['bits'].target)) self.local_addr_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), pubkey_hash=pubkey_hash)) return on_time return ba, got_response
eyalev/timee
refs/heads/master
timee/timee_general.py
1
from timee.timee_dt import TimeeDT class Timee(object): def __init__(self): pass @staticmethod def from_maya(maya_dt): """ :type maya_dt: maya.MayaDT """ _dt = maya_dt.datetime() timee_dt = TimeeDT(datetime_param=_dt) return timee_dt
luogangyi/bcec-nova
refs/heads/stable/icehouse
nova/scheduler/filters/pci_passthrough_filter.py
16
# Copyright (c) 2013 ISP RAS. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filters class PciPassthroughFilter(filters.BaseHostFilter): """Pci Passthrough Filter based on PCI request Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. PCI resource tracker provides updated summary information about the PCI devices for each host, like: [{"count": 5, "vendor_id": "8086", "product_id": "1520", "extra_info":'{}'}], and VM requests PCI devices via PCI requests, like: [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. The filter checks if the host passes or not based on this information. """ def host_passes(self, host_state, filter_properties): """Return true if the host has the required PCI devices.""" if not filter_properties.get('pci_requests'): return True return host_state.pci_stats.support_requests( filter_properties.get('pci_requests'))
NamelessRom/android_kernel_samsung_latona
refs/heads/android-4.4
tools/perf/scripts/python/netdev-times.py
11271
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
jomo/youtube-dl
refs/heads/master
youtube_dl/extractor/pornhub.py
3
from __future__ import unicode_literals import os import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, ) from ..utils import ( ExtractorError, str_to_int, ) from ..aes import ( aes_decrypt_text ) class PornHubIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-f]+)' _TEST = { 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', 'md5': '882f488fa1f0026f023f33576004a2ed', 'info_dict': { 'id': '648719015', 'ext': 'mp4', "uploader": "Babes", "title": "Seductive Indian beauty strips down and fingers her pink pussy", "age_limit": 18 } } @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/\d+)\1', webpage) if mobj: return mobj.group('url') def _extract_count(self, pattern, webpage, name): return str_to_int(self._search_regex( pattern, webpage, '%s count' % name, fatal=False)) def _real_extract(self, url): video_id = self._match_id(url) req = compat_urllib_request.Request( 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) error_msg = self._html_search_regex( r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>', webpage, 'error message', default=None) if error_msg: error_msg = re.sub(r'\s+', ' ', error_msg) raise ExtractorError( 'PornHub said: %s' % error_msg, expected=True, video_id=video_id) video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title') video_uploader = self._html_search_regex( r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<', webpage, 'uploader', fatal=False) thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False) if thumbnail: thumbnail = compat_urllib_parse.unquote(thumbnail) view_count = self._extract_count( r'<span class="count">([\d,\.]+)</span> views', webpage, 'view') like_count = self._extract_count( r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like') dislike_count = self._extract_count( r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike') comment_count = self._extract_count( r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage))) if webpage.find('"encrypted":true') != -1: password = compat_urllib_parse.unquote_plus( self._search_regex(r'"video_title":"([^"]+)', webpage, 'password')) video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) formats = [] for video_url in video_urls: path = compat_urllib_parse_urlparse(video_url).path extension = os.path.splitext(path)[1][1:] format = path.split('/')[5].split('_')[:2] format = "-".join(format) m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format) if m is None: height = None tbr = None else: height = int(m.group('height')) tbr = int(m.group('tbr')) formats.append({ 'url': video_url, 'ext': extension, 'format': format, 'format_id': format, 'tbr': tbr, 'height': height, }) self._sort_formats(formats) return { 'id': video_id, 'uploader': video_uploader, 'title': video_title, 'thumbnail': thumbnail, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, } class PornHubPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.pornhub.com/playlist/6201671', 'info_dict': { 'id': '6201671', 'title': 'P0p4', }, 'playlist_mincount': 35, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub') for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage)) ] playlist = self._parse_json( self._search_regex( r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'), playlist_id) return self.playlist_result( entries, playlist_id, playlist.get('title'), playlist.get('description'))
Endika/c2c-rd-addons
refs/heads/8.0
sale_order_2_purchase/invoice.py
4
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import openerp.netsvc import logging class account_invoice(osv.osv): _inherit = "account.invoice" def button_validate(self, cr , uid, ids, context=None): """FIXME workaround because of limited multi company support """ _logger = logging.getLogger(__name__) if not context: context = {} for invoice in self.browse(cr, uid, ids, context): _logger.debug('FGF validate partner %s ' %(invoice.partner_id.id) ) if invoice.partner_id.company_id and invoice.partner_id.company_id.id != invoice.company_id.id: _logger.debug('FGF update partner %s ' %(invoice.partner_id.id) ) self.pool.get('res.partner').write(cr, 1, [invoice.partner_id.id], {'company_id':''}) res= self.button_validate(cr , uid, ids, context) return res account_invoice()
mdjurfeldt/nest-simulator
refs/heads/master
pynest/nest/tests/test_onetooneconnect.py
9
# -*- coding: utf-8 -*- # # test_onetooneconnect.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ UnitTests for the PyNEST connect API. """ import unittest import nest @nest.check_stack class OneToOneConnectTestCase(unittest.TestCase): """Tests of Connect with OneToOne pattern""" def test_ConnectPrePost(self): """Connect pre to post""" # Connect([pre], [post]) nest.ResetKernel() pre = nest.Create("iaf_neuron", 2) post = nest.Create("iaf_neuron", 2) nest.Connect(pre, post, "one_to_one") connections = nest.GetConnections(pre) targets = nest.GetStatus(connections, "target") self.assertEqual(targets, post) def test_ConnectPrePostParams(self): """Connect pre to post with a params dict""" # Connect([pre], [post], params) nest.ResetKernel() pre = nest.Create("iaf_neuron", 2) post = nest.Create("iaf_neuron", 2) nest.Connect(pre, post, "one_to_one", syn_spec={"weight": 2.0}) connections = nest.GetConnections(pre) weights = nest.GetStatus(connections, "weight") self.assertEqual(weights, (2.0, 2.0)) # Connect([pre], [post], [params, params]) nest.ResetKernel() pre = nest.Create("iaf_neuron", 2) post = nest.Create("iaf_neuron", 2) nest.Connect(pre, post, conn_spec={"rule": "one_to_one"}, syn_spec={"weight": [2.0, 3.0]}) connections = nest.GetConnections(pre) weights = nest.GetStatus(connections, "weight") self.assertEqual(weights, (2.0, 3.0)) def test_ConnectPrePostWD(self): """Connect pre to post with a weight and delay""" # Connect([pre], [post], w, d) nest.ResetKernel() pre = nest.Create("iaf_neuron", 2) post = nest.Create("iaf_neuron", 2) nest.Connect(pre, post, conn_spec={"rule": "one_to_one"}, syn_spec={"weight": 2.0, "delay": 2.0}) connections = nest.GetConnections(pre) weights = nest.GetStatus(connections, "weight") delays = nest.GetStatus(connections, "delay") self.assertEqual(weights, (2.0, 2.0)) self.assertEqual(delays, (2.0, 2.0)) # Connect([pre], [post], [w, w], [d, d]) nest.ResetKernel() pre = nest.Create("iaf_neuron", 2) post = nest.Create("iaf_neuron", 2) nest.Connect(pre, post, conn_spec={"rule": "one_to_one"}, syn_spec={"weight": [2.0, 3.0], "delay": [2.0, 3.0]}) connections = nest.GetConnections(pre) weights = nest.GetStatus(connections, "weight") delays = nest.GetStatus(connections, "delay") self.assertEqual(weights, (2.0, 3.0)) self.assertEqual(delays, (2.0, 3.0)) def test_IllegalConnection(self): """Wrong Connections""" nest.ResetKernel() n = nest.Create('iaf_neuron') vm = nest.Create('voltmeter') self.assertRaisesRegex( nest.NESTError, "IllegalConnection", nest.Connect, n, vm) def test_UnexpectedEvent(self): """Unexpected Event""" nest.ResetKernel() n = nest.Create('iaf_neuron') sd = nest.Create('spike_detector') self.assertRaisesRegex( nest.NESTError, "UnexpectedEvent", nest.Connect, sd, n) def suite(): suite = unittest.makeSuite(OneToOneConnectTestCase, 'test') return suite def run(): runner = unittest.TextTestRunner(verbosity=2) runner.run(suite()) if __name__ == "__main__": run()
isleei/xhtml2pdf
refs/heads/master
tests/test_parser.py
130
import unittest from xhtml2pdf.parser import pisaParser from xhtml2pdf.context import pisaContext _data = """ <!doctype html> <html> <title>TITLE</title> <body> BODY </body> </html> """ class TestCase(unittest.TestCase): def testParser(self): c = pisaContext(".") r = pisaParser(_data, c) self.assertEqual(c, r) def buildTestSuite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) def main(): buildTestSuite() unittest.main() if __name__ == "__main__": main()
azurestandard/django
refs/heads/master
tests/regressiontests/test_client_regress/session.py
155
from django.contrib.sessions.backends.base import SessionBase class SessionStore(SessionBase): """ A simple cookie-based session storage implementation. The session key is actually the session data, pickled and encoded. This means that saving the session will change the session key. """ def __init__(self, session_key=None): super(SessionStore, self).__init__(session_key) def exists(self, session_key): return False def create(self): self._session_key = self.encode({}) def save(self, must_create=False): self._session_key = self.encode(self._session) def delete(self, session_key=None): self._session_key = self.encode({}) def load(self): try: return self.decode(self.session_key) except: self.modified = True return {}
CZ-NIC/knot
refs/heads/master
tests-extra/tests/dnssec/nsec3_revert/test.py
1
#!/usr/bin/env python3 '''Test for unsiccessful creation of NSEC3 tree''' from dnstest.test import Test t = Test() master = t.server("knot") zone = t.zone_rnd(1, records=200) t.link(zone, master) master.journal_max_usage = 51200 # the creation of NSEC3 tree fails on ESPACE t.start() master.zone_wait(zone) master.dnssec(zone).enable = True master.dnssec(zone).nsec3 = True master.gen_confile() master.reload() t.sleep(8) # unfixed knotd will crash upon update reversal master.flush(wait=True) t.end()