repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
apark263/tensorflow
|
refs/heads/master
|
tensorflow/contrib/gan/python/losses/python/losses_wargs.py
|
73
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN grouped API. Please see README.md for details and usage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python import losses_impl
from tensorflow.contrib.gan.python.losses.python.losses_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, losses_impl.__all__)
|
illfelder/compute-image-packages
|
refs/heads/master
|
packages/python-google-compute-engine/google_compute_engine/networking/network_setup/__init__.py
|
12133432
| |
DBarthe/wikigraph
|
refs/heads/master
|
wikigraph/collector/__init__.py
|
12133432
| |
tundebabzy/frappe
|
refs/heads/develop
|
frappe/patches/v11_0/__init__.py
|
12133432
| |
arun6582/django
|
refs/heads/master
|
django/contrib/gis/db/backends/oracle/__init__.py
|
12133432
| |
hernad/frappe
|
refs/heads/develop
|
frappe/desk/doctype/__init__.py
|
12133432
| |
LWJensen/coala-bears
|
refs/heads/master
|
bears/markdown/__init__.py
|
12133432
| |
yewang15215/django
|
refs/heads/master
|
django/contrib/sites/migrations/__init__.py
|
12133432
| |
carrillo/scikit-learn
|
refs/heads/master
|
sklearn/datasets/tests/test_covtype.py
|
335
|
"""Test the covtype loader.
Skipped if covtype is not already downloaded to data_home.
"""
import errno
from sklearn.datasets import fetch_covtype
from sklearn.utils.testing import assert_equal, SkipTest
def fetch(*args, **kwargs):
return fetch_covtype(*args, download_if_missing=False, **kwargs)
def test_fetch():
try:
data1 = fetch(shuffle=True, random_state=42)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Covertype dataset can not be loaded.")
data2 = fetch(shuffle=True, random_state=37)
X1, X2 = data1['data'], data2['data']
assert_equal((581012, 54), X1.shape)
assert_equal(X1.shape, X2.shape)
assert_equal(X1.sum(), X2.sum())
y1, y2 = data1['target'], data2['target']
assert_equal((X1.shape[0],), y1.shape)
assert_equal((X1.shape[0],), y2.shape)
|
huchoi/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/views/checklist.py
|
5
|
import json
import copy
from util.json_request import JsonResponse
from django.http import HttpResponseBadRequest
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from django.http import HttpResponseNotFound
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
from .access import has_course_access
from xmodule.course_module import CourseDescriptor
from django.utils.translation import ugettext
__all__ = ['checklists_handler']
# pylint: disable=unused-argument
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def checklists_handler(request, course_key_string, checklist_index=None):
"""
The restful handler for checklists.
GET
html: return html page for all checklists
json: return json representing all checklists. checklist_index is not supported for GET at this time.
POST or PUT
json: updates the checked state for items within a particular checklist. checklist_index is required.
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_access(request.user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
json_request = 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json')
if request.method == 'GET':
# If course was created before checklists were introduced, copy them over
# from the template.
if not course_module.checklists:
course_module.checklists = CourseDescriptor.checklists.default
modulestore().update_item(course_module, request.user.id)
expanded_checklists = expand_all_action_urls(course_module)
if json_request:
return JsonResponse(expanded_checklists)
else:
handler_url = reverse_course_url('checklists_handler', course_key)
return render_to_response('checklists.html',
{
'handler_url': handler_url,
# context_course is used by analytics
'context_course': course_module,
'checklists': expanded_checklists
})
elif json_request:
# Can now assume POST or PUT because GET handled above.
if checklist_index is not None and 0 <= int(checklist_index) < len(course_module.checklists):
index = int(checklist_index)
persisted_checklist = course_module.checklists[index]
modified_checklist = json.loads(request.body)
# Only thing the user can modify is the "checked" state.
# We don't want to persist what comes back from the client because it will
# include the expanded action URLs (which are non-portable).
for item_index, item in enumerate(modified_checklist.get('items')):
persisted_checklist['items'][item_index]['is_checked'] = item['is_checked']
# seeming noop which triggers kvs to record that the metadata is
# not default
course_module.checklists = course_module.checklists
course_module.save()
modulestore().update_item(course_module, request.user.id)
expanded_checklist = expand_checklist_action_url(course_module, persisted_checklist)
return JsonResponse(localize_checklist_text(expanded_checklist))
else:
return HttpResponseBadRequest(
("Could not save checklist state because the checklist index "
"was out of range or unspecified."),
content_type="text/plain"
)
else:
return HttpResponseNotFound()
def expand_all_action_urls(course_module):
"""
Gets the checklists out of the course module and expands their action urls.
Returns a copy of the checklists with modified urls, without modifying the persisted version
of the checklists.
"""
expanded_checklists = []
for checklist in course_module.checklists:
expanded_checklists.append(localize_checklist_text(expand_checklist_action_url(course_module, checklist)))
return expanded_checklists
def expand_checklist_action_url(course_module, checklist):
"""
Expands the action URLs for a given checklist and returns the modified version.
The method does a copy of the input checklist and does not modify the input argument.
"""
expanded_checklist = copy.deepcopy(checklist)
urlconf_map = {
"ManageUsers": "course_team_handler",
"CourseOutline": "course_handler",
"SettingsDetails": "settings_handler",
"SettingsGrading": "grading_handler",
}
for item in expanded_checklist.get('items'):
action_url = item.get('action_url')
if action_url in urlconf_map:
item['action_url'] = reverse_course_url(urlconf_map[action_url], course_module.id)
return expanded_checklist
def localize_checklist_text(checklist):
"""
Localize texts for a given checklist and returns the modified version.
The method does an in-place operation so the input checklist is modified directly.
"""
# Localize checklist name
checklist['short_description'] = ugettext(checklist['short_description'])
# Localize checklist items
for item in checklist.get('items'):
item['short_description'] = ugettext(item['short_description'])
item['long_description'] = ugettext(item['long_description'])
item['action_text'] = ugettext(item['action_text']) if item['action_text'] != "" else u""
return checklist
|
ashepelev/TopologyWeigher
|
refs/heads/master
|
test_framework/testing.py
|
1
|
import BandwidthHistory
import Scheduler
__author__ = 'ash'
import yaml
import GraphDrawer
import TrafficGen
stream_nodes = file('topology-examples/nodes1.yaml','r')
stream_edges = file('topology-examples/edges1.yaml','r')
node_list = yaml.load(stream_nodes)
edge_list = yaml.load(stream_edges)
bwhist = BandwidthHistory.BandwidthHistory(node_list,edge_list)
trgen = TrafficGen.TrafficGen(node_list,bwhist)
trgen.generator()
stream_nodes.close()
stream_edges.close()
dist = Scheduler.Scheduler.build_distances(bwhist)
task = Scheduler.Task.example_task()
print "Appropriate node: " + str(Scheduler.Scheduler.schedule(dist,task,node_list))
gr = GraphDrawer.GraphDrawer(node_list,edge_list)
graph = gr.get_edges()
labels = gr.get_labels()
gr.draw_graph(graph,labels=labels, graph_layout='spring',draw_bandwidth='avg')
exit()
|
i945/An
|
refs/heads/master
|
An/extra_apps/xadmin/plugins/quickfilter.py
|
10
|
'''
Created on Mar 26, 2014
@author: LAB_ADM
'''
from future.utils import iteritems
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from xadmin.filters import manager,MultiSelectFieldListFilter
from xadmin.plugins.filters import *
from xadmin.util import is_related_field
@manager.register
class QuickFilterMultiSelectFieldListFilter(MultiSelectFieldListFilter):
""" Delegates the filter to the default filter and ors the results of each
Lists the distinct values of each field as a checkbox
Uses the default spec for each
"""
template = 'xadmin/filters/quickfilter.html'
class QuickFilterPlugin(BaseAdminPlugin):
""" Add a filter menu to the left column of the page """
list_quick_filter = () # these must be a subset of list_filter to work
quickfilter = {}
search_fields = ()
free_query_filter = True
def init_request(self, *args, **kwargs):
menu_style_accordian = hasattr(self.admin_view,'menu_style') and self.admin_view.menu_style == 'accordion'
return bool(self.list_quick_filter) and not menu_style_accordian
# Media
def get_media(self, media):
return media + self.vendor('xadmin.plugin.quickfilter.js','xadmin.plugin.quickfilter.css')
def lookup_allowed(self, lookup, value):
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specificially included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existants fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif is_related_field(field):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_quick_filter
def get_list_queryset(self, queryset):
lookup_params = dict([(smart_str(k)[len(FILTER_PREFIX):], v) for k, v in self.admin_view.params.items() if smart_str(k).startswith(FILTER_PREFIX) and v != ''])
for p_key, p_val in iteritems(lookup_params):
if p_val == "False":
lookup_params[p_key] = False
use_distinct = False
if not hasattr(self.admin_view,'quickfilter'):
self.admin_view.quickfilter = {}
# for clean filters
self.admin_view.quickfilter['has_query_param'] = bool(lookup_params)
self.admin_view.quickfilter['clean_query_url'] = self.admin_view.get_query_string(remove=[k for k in self.request.GET.keys() if k.startswith(FILTER_PREFIX)])
# Normalize the types of keys
if not self.free_query_filter:
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise SuspiciousOperation("Filtering by %s not allowed" % key)
self.filter_specs = []
if self.list_quick_filter:
for list_quick_filter in self.list_quick_filter:
field_path = None
field_order_by = None
field_limit = None
field_parts = []
sort_key = None
cache_config = None
if type(list_quick_filter)==dict and 'field' in list_quick_filter:
field = list_quick_filter['field']
if 'order_by' in list_quick_filter:
field_order_by = list_quick_filter['order_by']
if 'limit' in list_quick_filter:
field_limit = list_quick_filter['limit']
if 'sort' in list_quick_filter and callable(list_quick_filter['sort']):
sort_key = list_quick_filter['sort']
if 'cache' in list_quick_filter and type(list_quick_filter)==dict:
cache_config = list_quick_filter['cache']
else:
field = list_quick_filter # This plugin only uses MultiselectFieldListFilter
if not isinstance(field, models.Field):
field_path = field
field_parts = get_fields_from_path(self.model, field_path)
field = field_parts[-1]
spec = QuickFilterMultiSelectFieldListFilter(field, self.request, lookup_params,self.model, self.admin_view, field_path=field_path,field_order_by=field_order_by,field_limit=field_limit,sort_key=sort_key,cache_config=cache_config)
if len(field_parts)>1:
spec.title = "%s %s"%(field_parts[-2].name,spec.title)
# Check if we need to use distinct()
use_distinct = True#(use_distinct orlookup_needs_distinct(self.opts, field_path))
if spec and spec.has_output():
try:
new_qs = spec.do_filte(queryset)
except ValidationError as e:
new_qs = None
self.admin_view.message_user(_("<b>Filtering error:</b> %s") % e.messages[0], 'error')
if new_qs is not None:
queryset = new_qs
self.filter_specs.append(spec)
self.has_filters = bool(self.filter_specs)
self.admin_view.quickfilter['filter_specs'] = self.filter_specs
obj = filter(lambda f: f.is_used, self.filter_specs)
if six.PY3:
obj = list(obj)
self.admin_view.quickfilter['used_filter_num'] = len(obj)
if use_distinct:
return queryset.distinct()
else:
return queryset
def block_left_navbar(self, context, nodes):
nodes.append(loader.render_to_string('xadmin/blocks/modal_list.left_navbar.quickfilter.html',
get_context_dict(context)))
site.register_plugin(QuickFilterPlugin, ListAdminView)
|
klmitch/nova
|
refs/heads/master
|
nova/tests/functional/compute/test_instance_list.py
|
5
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova import context
from nova.db import api as db
from nova import exception
from nova import objects
from nova import test
class InstanceListTestCase(test.TestCase):
NUMBER_OF_CELLS = 3
def setUp(self):
super(InstanceListTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
self.cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one.
for cell in self.cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_sorted(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_descending(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['desc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(list(reversed(sorted(uuids))), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_filter(self):
filters = {'instance_type_id': 1}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
self.assertEqual(list(sorted(expected)), uuids)
def test_get_sorted_by_defaults(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = None
sort_dirs = None
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = set([inst['uuid'] for inst in insts])
expected = set([inst['uuid'] for inst in self.instances])
self.assertEqual(expected, uuids)
def test_get_sorted_with_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
had_uuids = [inst.uuid for inst in self.instances]
self.assertEqual(sorted(had_uuids)[:5], uuids)
self.assertEqual(5, len(uuids))
def test_get_sorted_with_large_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_large_limit_batched(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'],
batch_size=2)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def _test_get_sorted_with_limit_marker(self, sort_by, pages=2, pagesize=2,
sort_dir='asc'):
"""Get multiple pages by a sort key and validate the results.
This requests $pages of $pagesize, followed by a final page with
no limit, and a final-final page which should be empty. It validates
that we got a consistent set of results no patter where the page
boundary is, that we got all the results after the unlimited query,
and that the final page comes back empty when we use the last
instance as a marker.
"""
insts = []
page = 0
while True:
if page >= pages:
# We've requested the specified number of limited (by pagesize)
# pages, so request a penultimate page with no limit which
# should always finish out the result.
limit = None
else:
# Request a limited-size page for the first $pages pages.
limit = pagesize
if insts:
# If we're not on the first page, use the last instance we
# received as the marker
marker = insts[-1]['uuid']
else:
# No marker for the first page
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
limit, marker,
[], [sort_by],
[sort_dir])[1])
if not batch:
# This should only happen when we've pulled the last empty
# page because we used the marker of the last instance. If
# we end up with a non-deterministic ordering, we'd loop
# forever.
break
insts.extend(batch)
page += 1
if page > len(self.instances) * 2:
# Do this sanity check in case we introduce (or find) another
# repeating page bug like #1721791. Without this we loop
# until timeout, which is less obvious.
raise Exception('Infinite paging loop')
# We should have requested exactly (or one more unlimited) pages
self.assertIn(page, (pages, pages + 1))
# Make sure the full set matches what we know to be true
found = [x[sort_by] for x in insts]
had = [x[sort_by] for x in self.instances]
if sort_by in ('launched_at', 'created_at'):
# We're comparing objects and database entries, so we need to
# squash the tzinfo of the object ones so we can compare
had = [x.replace(tzinfo=None) for x in had]
self.assertEqual(len(had), len(found))
if sort_dir == 'asc':
self.assertEqual(sorted(had), found)
else:
self.assertEqual(list(reversed(sorted(had))), found)
def test_get_sorted_with_limit_marker_stable(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname')
def test_get_sorted_with_limit_marker_stable_reverse(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
sort_dir='desc')
def test_get_sorted_with_limit_marker_stable_different_pages(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1)
def test_get_sorted_with_limit_marker_stable_different_pages_reverse(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1,
sort_dir='desc')
def test_get_sorted_with_limit_marker_random(self):
"""Test sorted by uuid.
This will not be stable and the actual ordering will depend on
uuid generation and thus be different on each run. Do this in
addition to the stable sort above to keep us honest.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid')
def test_get_sorted_with_limit_marker_random_different_pages(self):
"""Test sorted by uuid with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid',
pages=3, pagesize=2)
def test_get_sorted_with_limit_marker_datetime(self):
"""Test sorted by launched_at.
This tests that we can do all of this, but with datetime
fields.
"""
self._test_get_sorted_with_limit_marker(sort_by='launched_at')
def test_get_sorted_with_limit_marker_datetime_same(self):
"""Test sorted by created_at.
This tests that we can do all of this, but with datetime
fields that are identical.
"""
self._test_get_sorted_with_limit_marker(sort_by='created_at')
def test_get_sorted_with_deleted_marker(self):
marker = self.instances[1]['uuid']
before = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
db.instance_destroy(self.context, marker)
after = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
self.assertEqual(before, after)
def test_get_sorted_with_invalid_marker(self):
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, 'not-a-marker',
[], None, None)[1])
def test_get_sorted_with_purged_instance(self):
"""Test that we handle a mapped but purged instance."""
im = objects.InstanceMapping(self.context,
instance_uuid=uuids.missing,
project_id=self.context.project_id,
user_id=self.context.user_id,
cell=self.cells[0])
im.create()
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, uuids.missing,
[], None, None)[1])
def _test_get_paginated_with_filter(self, filters):
found_uuids = []
marker = None
while True:
# Query for those instances, sorted by a different key in
# pages of one until we've consumed them all
batch = list(
instance_list.get_instances_sorted(self.context,
filters,
1, marker, [],
['hostname'],
['asc'])[1])
if not batch:
break
found_uuids.extend([x['uuid'] for x in batch])
marker = found_uuids[-1]
return found_uuids
def test_get_paginated_with_uuid_filter(self):
"""Test getting pages with uuid filters.
This runs through the results of a uuid-filtered query in pages of
length one to ensure that we land on markers that are filtered out
of the query and are not accidentally returned.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7]}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_paginated_with_other_filter(self):
"""Test getting pages with another filter.
This runs through the results of a filtered query in pages of
length one to ensure we land on markers that are filtered out
of the query and are not accidentally returned.
"""
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
filters = {'instance_type_id': 1}
found_uuids = self._test_get_paginated_with_filter(filters)
self.assertEqual(set(expected), set(found_uuids))
def test_get_paginated_with_uuid_and_other_filter(self):
"""Test getting pages with a uuid and other type of filter.
We do this to make sure that we still find (but exclude) the
marker even if one of the other filters would have included
it.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7],
'user_id': 'fake'}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_sorted_with_faults(self):
"""Make sure we get faults when we ask for them."""
insts = list(
instance_list.get_instances_sorted(self.context, {},
None, None,
['fault'],
['hostname'], ['asc'])[1])
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_get_sorted_paginated_with_faults(self):
"""Get pages of one with faults.
Do this specifically so we make sure we land on faulted marker
instances to ensure we don't omit theirs.
"""
insts = []
while True:
if insts:
marker = insts[-1]['uuid']
else:
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
1, marker,
['fault'],
['hostname'], ['asc'])[1])
if not batch:
break
insts.extend(batch)
self.assertEqual(len(self.instances), len(insts))
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_instance_list_minimal_cells(self):
"""Get a list of instances with a subset of cell mappings."""
last_cell = self.cells[-1]
with context.target_cell(self.context, last_cell) as cctxt:
last_cell_instances = db.instance_get_all(cctxt)
last_cell_uuids = [inst['uuid'] for inst in last_cell_instances]
instances = list(
instance_list.get_instances_sorted(self.context, {},
None, None, [],
['uuid'], ['asc'],
cell_mappings=self.cells[:-1])
[1])
found_uuids = [inst['hostname'] for inst in instances]
had_uuids = [inst['hostname'] for inst in self.instances
if inst['uuid'] not in last_cell_uuids]
self.assertEqual(sorted(had_uuids), sorted(found_uuids))
class TestInstanceListObjects(test.TestCase):
def setUp(self):
super(TestInstanceListObjects, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one
for cell in cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_instance_objects_sorted(self):
filters = {}
limit = None
marker = None
expected_attrs = []
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# Make sure none of the instances have fault set
self.assertEqual(0, len([inst for inst in insts
if 'fault' in inst]))
def test_get_instance_objects_sorted_with_fault(self):
filters = {}
limit = None
marker = None
expected_attrs = ['fault']
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# They should all have fault set, but only some have
# actual faults
self.assertEqual(2, len([inst for inst in insts
if inst.fault]))
def test_get_instance_objects_sorted_paged(self):
"""Query a full first page and ensure an empty second one.
This uses created_at which is enforced to be the same across
each instance by setUp(). This will help make sure we still
have a stable ordering, even when we only claim to care about
created_at.
"""
instp1, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, None, [],
['created_at'], ['asc'])
self.assertEqual(len(self.instances), len(instp1))
instp2, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, instp1[-1]['uuid'], [],
['created_at'], ['asc'])
self.assertEqual(0, len(instp2))
|
OpenFAST/r-test
|
refs/heads/f/driver
|
updateBaselineSolutions.py
|
1
|
#
# Copyright 2017 National Renewable Energy Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This program copies locally generated solutions into the appropriate machine - compiler
specific directory to update the baseline solutions.
Usage: python updateBaselineSolutions.py input_case_list source_solution_parent target_solution_parent system_name compiler_id
input_case_list - a text file listing all the cases to copy
source_solution_parent - the location to copy the files from. this is the parent directory of the cases; for example, `openfast/build/reg_tests/openfast`
target_solution_parent - the location to copy the files to. this is the parent directory of the target cases; for example, `openfast/reg_tests/r-test/openfast`
Example: python updateBaselineSolutions.py caselist.txt source/solution/parent target/solution/parent [macos,linux,windows] [intel,gnu]
"""
import sys
import os
import shutil
##### Helper functions
def exitWithError(error):
print(error)
sys.exit(1)
def exitWithDirNotFound(dir):
exitWithError("Directory does not exist: {}\n".format(dir))
##### Main
### Verify input arguments
if len(sys.argv) != 6:
exitWithError("Invalid arguments: {}\n".format(" ".join(sys.argv)) +
"Usage: python updateBaselineSolutions.py caselist.txt local/solution/parent baseline/solution/parent [macos,linux,windows] [intel,gnu]")
with open(sys.argv[1]) as listfile:
content = listfile.readlines()
# allow comments with '#'
casenames = [x.rstrip("\n\r").strip() for x in content if "#" not in x]
# allow empty lines
casenames = [x for x in casenames if len(x.strip()) > 0]
sourceParent = sys.argv[2]
targetParent = sys.argv[3]
machine = sys.argv[4]
compiler = sys.argv[5]
for case in casenames:
# verify source directory exists. if not, bail
if not os.path.isdir(sourceParent):
exitWithDirNotFound(sourceParent)
# verify destination directory exists. if not, make it
destinationDir = os.path.join(targetParent, case, "{}-{}".format(machine, compiler))
if not os.path.isdir(destinationDir):
os.makedirs(destinationDir)
caseDir = os.path.join(sourceParent, case)
sourceFiles = os.listdir(caseDir)
if "linear" in case.lower():
targetExtensions = [".lin"]
else:
targetExtensions = [".out", ".outb", ".sum"]
targetExtensions += [".log"]
targetFiles = [s for s in sourceFiles for t in targetExtensions if t in s]
for f in targetFiles:
shutil.copyfile(os.path.join(caseDir, f), os.path.join(destinationDir,f))
|
mistercrunch/airflow
|
refs/heads/master
|
airflow/hooks/dbapi.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from urllib.parse import quote_plus
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
class DbApiHook(BaseHook):
"""Abstract base class for sql hooks."""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = f'{quote_plus(conn.login)}:{quote_plus(conn.password)}@'
host = conn.host
if conn.port is not None:
host += f':{conn.port}'
uri = f'{conn.conn_type}://{login}{host}/'
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
:type kwargs: dict
"""
from pandas.io import sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
if isinstance(sql, str):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for sql_statement in sql:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
if hasattr(cur, 'rowcount'):
self.log.info("Rows affected: %s", cur.rowcount)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{} {} VALUES ({})".format(table, target_fields, ",".join(placeholders))
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None): # pylint: disable=unused-argument
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
|
rdeheele/odoo
|
refs/heads/master
|
addons/auth_openid/controllers/main.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
blackzw/openwrt_sdk_dev1
|
refs/heads/master
|
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_getargs.py
|
130
|
"""
Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
import marshal
import unittest
from test import test_support
class GetArgsTest(unittest.TestCase):
# If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
def test_with_marshal(self):
arg = unicode(r'\222', 'unicode-escape')
self.assertRaises(UnicodeError, marshal.loads, arg)
def test_main():
test_support.run_unittest(GetArgsTest)
if __name__ == '__main__':
test_main()
|
nolanamy/zxing
|
refs/heads/master
|
cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/sunar.py
|
34
|
"""engine.SCons.Tool.sunar
Tool-specific initialization for Solaris (Forte) ar (library archive). If CC
exists, static libraries should be built with it, so that template
instantians can be resolved.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunar.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-xar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gangadharkadam/smrterp
|
refs/heads/develop
|
erpnext/accounts/report/sales_invoice_trends/__init__.py
|
12133432
| |
namili/blueman
|
refs/heads/master
|
blueman/main/applet/__init__.py
|
12133432
| |
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/test_utils/__init__.py
|
12133432
| |
spaceone/pyjs
|
refs/heads/master
|
examples/_examples/__init__.py
|
12133432
| |
digimarc/django
|
refs/heads/master
|
django/contrib/sessions/management/commands/__init__.py
|
12133432
| |
memsharded/conan
|
refs/heads/develop
|
conans/test/functional/command/help_test.py
|
1
|
import sys
import unittest
from six import StringIO
from conans import __version__
from conans.test.utils.tools import TestClient
class BasicClientTest(unittest.TestCase):
def help_test(self):
client = TestClient()
client.run("")
self.assertIn('Conan commands. Type "conan <command> -h" for help', client.out)
client.run("--version")
self.assertIn("Conan version %s" % __version__, client.out)
client.run("some_unknown_command123", assert_error=True)
self.assertIn("ERROR: Unknown command 'some_unknown_command123'", client.out)
def help_cmd_test(self):
client = TestClient()
try:
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
client.run("help new")
finally:
sys.stdout = old_stdout
self.assertIn("Creates a new package recipe template with a 'conanfile.py'",
result.getvalue())
try:
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
client.run("help build")
finally:
sys.stdout = old_stdout
self.assertIn("Calls your local conanfile.py 'build()' method",
result.getvalue())
client.run("help")
self.assertIn("Creator commands",
client.out)
def help_cmd_error_test(self):
client = TestClient()
client.run("help not-exists", assert_error=True)
self.assertIn("ERROR: Unknown command 'not-exists'", client.out)
|
jeremymcrhat/Nexus_5X_kernel
|
refs/heads/8992_upstream_test_v2
|
scripts/gdb/linux/__init__.py
|
2010
|
# nothing to do for the initialization of this package
|
munhanha/mtg-random
|
refs/heads/master
|
django/test/client.py
|
73
|
import urllib
from urlparse import urlparse, urlunparse, urlsplit
import sys
import os
import re
import mimetypes
import warnings
from copy import copy
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.db import transaction, close_connection
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
signals.request_started.send(sender=self.__class__)
try:
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
finally:
signals.request_finished.disconnect(close_connection)
signals.request_finished.send(sender=self.__class__)
signals.request_finished.connect(close_connection)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
def encode_file(boundary, key, file):
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(file.name))),
'Content-Type: %s' % content_type,
'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = StringIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return urllib.unquote(parsed[2] + ";" + parsed[3])
else:
return urllib.unquote(parsed[2])
def get(self, path, data={}, **extra):
"Construct a GET request"
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'GET',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'HEAD',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def options(self, path, data={}, **extra):
"Constrict an OPTIONS request"
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'OPTIONS',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a PUT request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
# Make `data` into a querystring only if it's not already a string. If
# it is a string, we'll assume that the caller has already encoded it.
query_string = None
if not isinstance(data, basestring):
query_string = urlencode(data, doseq=True)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string or parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def delete(self, path, data={}, **extra):
"Construct a DELETE request."
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'DELETE',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Provide a backwards-compatible (but pending deprecation) response.template
def _get_template(self):
warnings.warn("response.template is deprecated; use response.templates instead (which is always a list)",
PendingDeprecationWarning, stacklevel=2)
if not self.templates:
return None
elif len(self.templates) == 1:
return self.templates[0]
return self.templates
response.__class__.template = property(_get_template)
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data={}, follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
session = import_module(settings.SESSION_ENGINE).SessionStore()
session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if session_cookie:
session.delete(session_key=session_cookie.value)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
if scheme:
extra['wsgi.url_scheme'] = scheme
# The test client doesn't handle external links,
# but since the situation is simulated in test_client,
# we fake things here by ignoring the netloc portion of the
# redirected URL.
response = self.get(path, QueryDict(query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
samisalkosuo/clipasswordmgr
|
refs/heads/master
|
clipwdmgr/commands/ExitCommand.py
|
1
|
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2015,2018 Sami Salkosuo
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#exit-command, does nothing except provides help text to help-command and cmd name
# for command completion
#
from ..utils.utils import *
from ..utils.functions import *
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class ExitCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
#implement in command class
#parse arguments like in this method
cmd_parser = ThrowingArgumentParser(prog="exit",description='Exit program.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
#implement command here
print("Exit program.")
|
seanli9jan/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/partitioned_variables.py
|
4
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating partitioned variables.
This is a convenient abstraction to partition a large variable across
multiple smaller variables that can be assigned to different devices.
The full variable can be reconstructed by concatenating the smaller variables.
Using partitioned variables instead of a single variable is mostly a
performance choice. It however also has an impact on:
1. Random initialization, as the random number generator is called once per
slice
2. Updates, as they happen in parallel across slices
A key design goal is to allow a different graph to repartition a variable
with the same name but different slicings, including possibly no partitions.
TODO(touts): If an initializer provides a seed, the seed must be changed
deterministically for each slice, maybe by adding one to it, otherwise each
slice will use the same values. Maybe this can be done by passing the
slice offsets to the initializer functions.
Typical usage:
```python
# Create a list of partitioned variables with:
vs = create_partitioned_variables(
<shape>, <slicing>, <initializer>, name=<optional-name>)
# Pass the list as inputs to embedding_lookup for sharded, parallel lookup:
y = embedding_lookup(vs, ids, partition_strategy="div")
# Or fetch the variables in parallel to speed up large matmuls:
z = matmul(x, concat(slice_dim, vs))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
@tf_export("variable_axis_size_partitioner")
def variable_axis_size_partitioner(
max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):
"""Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
This partitioner will shard a Variable along one axis, attempting to keep
the maximum shard size below `max_shard_bytes`. In practice, this is not
always possible when sharding along only one axis. When this happens,
this axis is sharded as much as possible (i.e., every dimension becomes
a separate shard).
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost
`64MB`, to keep below the protobuf byte limit.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
axis: The axis to partition along. Default: outermost axis.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
max_shards: The maximum number of shards in int created taking precedence
over `max_shard_bytes`.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
Raises:
ValueError: If any of the byte counts are non-positive.
"""
if max_shard_bytes < 1 or bytes_per_string_element < 1:
raise ValueError(
"Both max_shard_bytes and bytes_per_string_element must be positive.")
if max_shards and max_shards < 1:
raise ValueError(
"max_shards must be positive.")
def _partitioner(shape, dtype):
"""Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("shape is not a TensorShape: %s" % shape)
if not shape.is_fully_defined():
raise ValueError("shape is not fully defined: %s" % shape)
if not isinstance(dtype, dtypes.DType):
raise ValueError("dtype is not a DType: %s" % dtype)
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (
shape.num_elements() / shape.dims[axis].value) * element_size
# How many slices can we fit on one shard of size at most max_shard_bytes?
# At least one slice is required.
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
# How many shards do we need for axis given that each shard fits
# slices_per_shard slices from a total of shape[axis] slices?
axis_shards = int(math.ceil(
1.0 * shape.dims[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
return _partitioner
@tf_export("min_max_variable_partitioner")
def min_max_variable_partitioner(max_partitions=1, axis=0,
min_slice_size=256 << 10,
bytes_per_string_element=16):
"""Partitioner to allocate minimum size per slice.
Returns a partitioner that partitions the variable of given shape and dtype
such that each partition has a minimum of `min_slice_size` slice of the
variable. The maximum number of such partitions (upper bound) is given by
`max_partitions`.
Args:
max_partitions: Upper bound on the number of partitions. Defaults to 1.
axis: Axis along which to partition the variable. Defaults to 0.
min_slice_size: Minimum size of the variable slice per partition. Defaults
to 256K.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, dtype):
"""Partitioner that partitions list for a variable of given shape and type.
Ex: Consider partitioning a variable of type float32 with
shape=[1024, 1024].
If `max_partitions` >= 16, this function would return
[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].
If `max_partitions` < 16, this function would return
[`max_partitions`, 1].
Args:
shape: Shape of the variable.
dtype: Type of the variable.
Returns:
List of partitions for each axis (currently only one axis can be
partitioned).
Raises:
ValueError: If axis to partition along does not exist for the variable.
"""
if axis >= len(shape):
raise ValueError("Can not partition variable along axis %d when shape is "
"only %s" % (axis, shape))
if dtype.base_dtype == dtypes.string:
bytes_per_element = bytes_per_string_element
else:
bytes_per_element = dtype.size
total_size_bytes = shape.num_elements() * bytes_per_element
partitions = total_size_bytes / min_slice_size
partitions_list = [1] * len(shape)
# We can not partition the variable beyond what its shape or
# `max_partitions` allows.
partitions_list[axis] = max(1, min(shape.dims[axis].value,
max_partitions,
int(math.ceil(partitions))))
return partitions_list
return _partitioner
@tf_export("fixed_size_partitioner")
def fixed_size_partitioner(num_shards, axis=0):
"""Partitioner to specify a fixed number of shards along given axis.
Args:
num_shards: `int`, number of shards to partition variable.
axis: `int`, axis to partition on.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, **unused_args):
partitions_list = [1] * len(shape)
partitions_list[axis] = min(num_shards, shape.dims[axis].value)
return partitions_list
return _partitioner
@tf_export("create_partitioned_variables")
def create_partitioned_variables(
shape, slicing, initializer, dtype=dtypes.float32,
trainable=True, collections=None, name=None, reuse=None):
"""Create a list of partitioned variables according to the given `slicing`.
Currently only one dimension of the full variable can be sliced, and the
full variable can be reconstructed by the concatenation of the returned
list along that dimension.
Args:
shape: List of integers. The shape of the full variable.
slicing: List of integers. How to partition the variable.
Must be of the same length as `shape`. Each value
indicate how many slices to create in the corresponding
dimension. Presently only one of the values can be more than 1;
that is, the variable can only be sliced along one dimension.
For convenience, The requested number of partitions does not have to
divide the corresponding dimension evenly. If it does not, the
shapes of the partitions are incremented by 1 starting from partition
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name for the full variable. Defaults to
`"PartitionedVariable"` and gets uniquified automatically.
reuse: Boolean or `None`; if `True` and name is set, it would reuse
previously created variables. if `False` it will create new variables.
if `None`, it would inherit the parent scope reuse.
Returns:
A list of Variables corresponding to the slicing.
Raises:
ValueError: If any of the arguments is malformed.
"""
logging.warn(
"create_partitioned_variables is deprecated. Use "
"tf.get_variable with a partitioner set, or "
"tf.get_partitioned_variable_list, instead.")
if len(shape) != len(slicing):
raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
"must have the length: shape: %s, slicing: %s" %
(shape, slicing))
if len(shape) < 1:
raise ValueError("A partitioned Variable must have rank at least 1: "
"shape: %s" % shape)
# Legacy: we are provided the slicing directly, so just pass it to
# the partitioner.
partitioner = lambda **unused_kwargs: slicing
with variable_scope.variable_scope(
name, "PartitionedVariable", reuse=reuse):
# pylint: disable=protected-access
partitioned_var = variable_scope._get_partitioned_variable(
name=None,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=partitioner,
collections=collections)
return list(partitioned_var)
# pylint: enable=protected-access
|
trol73/avr-ic-tester-v2
|
refs/heads/master
|
python/device.py
|
1
|
# -*- coding: utf-8 -*-
import time
import serial
__author__ = 'Trol'
class Device:
CMD_SYNC = 1
CMD_ENTER_TEST_MODE = 2
CMD_TEST_INSTRUCTION = 3
CMD_READ_CHIP = 4
CMD_EXIT_TEST_MODE = 5
CMD_SEND_BYTE_TO_DISPLAY = 6
CMD_SEND_BYTES_TO_DISPLAY = 7
CMD_READ_BYTE_FROM_DISPLAY = 8
CMD_SET_DIRECTION = 9
CMD_GET_PIN = 10
CMD_SET_POWER = 11
CMD_POWER_OFF = 12
CMD_TEST_LOGIC = 13
CMD_TEST_RAM = 14
CMD_READ_ROM_START = 15
CMD_READ_ROM_NEXT = 16
CMD_GOTO_BOOTLOADER = 17
CMD_ABOUT = 18
CMD_CONFIGURE_POWER = 19
CMD_POWER_OUT_BYTE = 20
CMD_POWER_SET_ALL = 21
TEST_CMD_SET_40 = 8
TEST_CMD_SET_TO_0 = 43
TEST_CMD_SET_TO_1 = 44
def __init__(self, port_name, bauds):
self.serial = serial.serial_for_url(port_name, baudrate=bauds, timeout=0.5)
self.connected = False
def connect(self):
# self.connected = True
# try:
# if self.sync(123) == 123 and self.sync(0xAB) == 0xAB:
# self.connected = True
# return
# except:
# pass
# try:
# if self.sync(123) == 123 and self.sync(0xAB) == 0xAB:
# self.connected = True
# return
# except:
# pass
# try:
# if self.sync(123) == 123 and self.sync(0xAB) == 0xAB:
# self.connected = True
# return
# except:
# pass
#
# self.connected = False
try:
self._read()
except:
pass
for i in range(1, 10):
#print '?', i
try:
if self.sync(Device.CMD_SYNC) == i:
break
except:
print '.',
pass
for i in range(100, 200, 10):
try:
if self.sync(i) == i:
break
except:
print '.',
pass
print
self.connected = True
def close(self):
self.serial.close()
self.connected = False
def sync(self, v):
#print 'sync', v
self._cmd(Device.CMD_SYNC, v)
resp = self._read()
#print 'resp', resp
return resp
def enter_test_mode(self):
self._cmd(Device.CMD_ENTER_TEST_MODE, 13)
if self._read() != 0:
raise Exception("Can't enter remote mode")
def exit_test_mode(self):
self._cmd(Device.CMD_EXIT_TEST_MODE)
return self._read() == 0
def exec_test_instruction(self, *args):
self._cmd(Device.CMD_TEST_INSTRUCTION)
for a in args:
self._cmd(a)
return self._read() == 0
def set_pin_direction(self, pin40, out):
self._cmd(Device.CMD_SET_DIRECTION, pin40, 1 if out else 0)
# try:
# resp = self._read()
# print 'r1', resp
# except:
# print '.'
# try:
# resp = self._read()
# print 'r2', resp
# except:
# print '.'
# try:
# resp = self._read()
# print 'r3', resp
# except:
# print '.'
# if resp != 0:
# raise Exception('set_pi_direction() error, resp = ' + str(resp))
if self._read() != 0:
raise Exception("Can't set pin direction")
def power_out_byte(self, b):
self._cmd(Device.CMD_POWER_OUT_BYTE, b)
def power_set_all(self):
self._cmd(Device.CMD_POWER_SET_ALL)
def rom_read_start(self, rom_type):
self._cmd(Device.CMD_READ_ROM_START, rom_type)
def rom_read_next(self):
self._cmd(Device.CMD_READ_ROM_NEXT)
return self._read()
def set_pin_to(self, pin, val):
if type(val) is bool:
if val:
self.exec_test_instruction(Device.TEST_CMD_SET_TO_1, pin)
else:
self.exec_test_instruction(Device.TEST_CMD_SET_TO_0, pin)
elif type(val) is int:
if val == 1:
self.exec_test_instruction(Device.TEST_CMD_SET_TO_1, pin)
elif val == 0:
self.exec_test_instruction(Device.TEST_CMD_SET_TO_0, pin)
else:
raise Exception('wrong pin value ' + str(val))
else:
raise Exception('wrong pin value, bool or int expected')
def set_pins(self, to_0_list, to_1_list):
m0 = [0, 0, 0, 0, 0]
m1 = [0, 0, 0, 0, 0]
for i in range(0, 5):
for bit in range(0, 8):
pin = 8*i + bit
if pin in to_0_list:
m0[i] |= 1 << bit
if pin in to_1_list:
m1[i] |= 1 << bit
self.exec_test_instruction(Device.TEST_CMD_SET_40, m0, m1)
def get_pin(self, pin):
self._cmd(Device.CMD_GET_PIN, pin)
val = self._read()
#print 'get pin ', pin, ' -> ', val
return val
def set_power_plus(self, pin):
self._cmd(Device.CMD_SET_POWER, pin+1, 2)
resp = self._read()
if resp != 0:
raise Exception("Can't set_power_plus for " + str(pin) + " pin (response=" + str(resp) + ')')
def set_power_minus(self, pin):
self._cmd(Device.CMD_SET_POWER, pin+1, 1)
resp = self._read()
if resp != 0:
raise Exception("Can't set_power_minus for " + str(pin) + " pin (response=" + str(resp) + ')')
def disable_power(self):
self._cmd(Device.CMD_POWER_OFF)
def read_chip(self):
self._cmd(Device.CMD_READ_CHIP)
result = []
for i in range(0, 40):
v = self._read()
ddr = 'i' if (v & 4) == 0 else 'o'
port = 0 if (v & 2) == 0 else 1
pin = 0 if (v & 1) == 0 else 1
rec = {'ddr': ddr, 'port': port, 'pin': pin}
result.append(rec)
return result
def _cmd(self, *args):
if not self.connected:
raise Exception("Device doesn't connected")
# if self.prefix >= 0:
# self._write(self.prefix)
# self._write(len(args))
for a in args:
#print a
if type(a) is tuple:
for v in a:
self._write(v)
elif type(a) is str:
for c in a:
self._write(ord(c))
self._write(0)
elif type(a) is list:
for v in a:
self._write(v)
else:
self._write(a)
def _write(self, b):
# print '>> ', b
self.serial.write(chr(b))
def _read(self):
b = ord(self.serial.read())
# print '<< ', b
return b
#t = Device('com3', 57600)
#t.connect()
#print 10, t.sync(10)
#t.enter_test_mode()
# raw_input('power+')
# t.exec_test_instruction_2(41, 40) # POWER+ 40
# raw_input('power-')
# t.exec_test_instruction_2(42, 12) # POWER- 12
# raw_input('init')
# t.exec_test_instruction_3(1, 0, 0) # INIT 0, 0
# raw_input('power_connected')
# t.power_out_byte(0)
# t.power_out_byte(0xff)
# t.power_out_byte(0xff)
# t.power_set_all()
# t.rom_read_start(6) # 512
# t.rom_read_start(5) # 256
# t.rom_read_start(4) # 128
# t.rom_read_start(3) # 64
# t.rom_read_start(1) # 16
# t.rom_read_start(7) # 155RE3
# t.rom_read_start(8) # 556PT4
#t.rom_read_start(9) # 556PT5
#s = ''
# for i in range(0, 512):
# if (i % 16) == 0:
# print s
# s = ''
#
# s += chr(t.rom_read_next()).encode('hex') + ' '
# raw_input('power_connected')
#t.exit_test_mode()
# t.exit_test_mode()
|
Sage-Bionetworks/U4CChallenge
|
refs/heads/master
|
python/challenge_config.template.py
|
3
|
##-----------------------------------------------------------------------------
##
## challenge specific code and configuration
##
##-----------------------------------------------------------------------------
## A Synapse project will hold the assetts for your challenge. Put its
## synapse ID here, for example
## CHALLENGE_SYN_ID = "syn1234567"
CHALLENGE_SYN_ID = ""
## Name of your challenge, defaults to the name of the challenge's project
CHALLENGE_NAME = ""
## Synapse user IDs of the challenge admins who will be notified by email
## about errors in the scoring script
ADMIN_USER_IDS = []
## Each question in your challenge should have an evaluation queue through
## which participants can submit their predictions or models. The queues
## should specify the challenge project as their content source. Queues
## can be created like so:
## evaluation = syn.store(Evaluation(
## name="My Challenge Q1",
## description="Predict all the things!",
## contentSource="syn1234567"))
## ...and found like this:
## evaluations = list(syn.getEvaluationByContentSource('syn3375314'))
## Configuring them here as a list will save a round-trip to the server
## every time the script starts.
evaluation_queues = []
evaluation_queue_by_id = {q['id']:q for q in evaluation_queues}
## define the default set of columns that will make up the leaderboard
LEADERBOARD_COLUMNS = [
dict(name='objectId', display_name='ID', columnType='STRING', maximumSize=20),
dict(name='userId', display_name='User', columnType='STRING', maximumSize=20, renderer='userid'),
dict(name='entityId', display_name='Entity', columnType='STRING', maximumSize=20, renderer='synapseid'),
dict(name='versionNumber', display_name='Version', columnType='INTEGER'),
dict(name='name', display_name='Name', columnType='STRING', maximumSize=240),
dict(name='team', display_name='Team', columnType='STRING', maximumSize=240)]
## Here we're adding columns for the output of our scoring functions, score,
## rmse and auc to the basic leaderboard information. In general, different
## questions would typically have different scoring metrics.
leaderboard_columns = {}
for q in evaluation_queues:
leaderboard_columns[q['id']] = LEADERBOARD_COLUMNS + [
dict(name='score', display_name='Score', columnType='DOUBLE'),
dict(name='rmse', display_name='RMSE', columnType='DOUBLE'),
dict(name='auc', display_name='AUC', columnType='DOUBLE')]
## map each evaluation queues to the synapse ID of a table object
## where the table holds a leaderboard for that question
leaderboard_tables = {}
def validate_submission(evaluation, submission):
"""
Find the right validation function and validate the submission.
:returns: (True, message) if validated, (False, message) if
validation fails or throws exception
"""
return True, "Looks OK to me!"
def score_submission(evaluation, submission):
"""
Find the right scoring function and score the submission
:returns: (score, message) where score is a dict of stats and message
is text for display to user
"""
import random
return (dict(score=random.random(), rmse=random.random(), auc=random.random()), "You did fine!")
|
agoose77/hivesystem
|
refs/heads/master
|
dragonfly/std/generator.py
|
1
|
import bee
from bee.segments import *
class generator(object):
metaguiparams = {"type": "type", "generator": "object"}
def __new__(cls, type, generator):
genfunc = generator
class generator(bee.worker):
gen = None
outp = output("pull", type)
v_outp = variable(type)
connect(v_outp, outp)
@modifier
def generate(self):
if self.gen is None:
self.gen = genfunc()
self.v_outp = next(self.gen)
pretrigger(v_outp, generate)
return generator
|
c-benko/HHG_Phasematching
|
refs/heads/master
|
test/i_scan.py
|
1
|
# i_scan.py
import sys, os
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(here, '../src')))
from phasematching import *
import matplotlib
from matplotlib import pyplot as plt
num = 100
iscan = np.linspace(0.1, 1.0, num)
harm = np.array(np.zeros(num))
Lcoh = np.array(np.zeros(num))
eta = np.array(np.zeros(num))
Labs = np.array(np.zeros(num))
Lmed = np.array(np.zeros(num))
for i in range(num):
sim = phase_matching('Xe', 17, iscan[i] , 120e-15, 17e-6, 1070e-9, .17, 0.10e-3, 100, 0, 'on')
harm[i], Lcoh[i], eta[i], Labs[i], Lmed[i] = sim.int_harmonic_yield()
fig, ax = plt.subplots(2, 2, figsize = (15,8))
matplotlib.rcParams.update({'font.size': 20})
ax[0,0].semilogy(iscan, harm, 'k-', linewidth = 2)
# ylim(.01,1)
ax[0,0].set_xlabel('Intensity [ x 10 ** 14 W cm ** -2]')
ax[0,0].set_ylabel('Harmonic Yield [arb.]')
ax[0,0].set_title(sim.Atom + ' at ' + str(sim.q) + ' Harmonic')
ax[0,0].grid()
# show()
ax[0,1].plot(iscan, Lcoh * 10 ** 3, 'k-', linewidth = 2, label = 'Coherence Length')
ax[0,1].plot(iscan, Lmed * 10 ** 3, 'r-', linewidth = 2, label = 'Medium Length')
ax[0,1].plot(iscan, Labs * 10 ** 3, 'b-', linewidth = 2, label = 'Absorption Length')
# ylim(.01,1)
ax[0,1].set_xlabel('Intensity [ x 10 ** 14 W cm ** -2]')
ax[0,1].set_ylabel('Length [m]')
ax[0,1].set_title(sim.Atom + ' at ' + str(sim.q) + ' Harmonic')
ax[0,1].legend(prop={'size':12})
ax[0,1].grid()
ax[1,0].plot(iscan, eta, 'm-', linewidth = 2, label = 'Ionization Fraction')
# ylim(.01,1)
ax[1,0].set_xlabel('Intensity [ x 10 ** 14 W cm ** -2]')
ax[1,0].set_ylabel('Ionization Fraction')
ax[1,0].set_title(sim.Atom + ' at ' + str(sim.q) + ' Harmonic')
ax[1,0].grid()
ax[1,1].plot(iscan, Lcoh / Labs, 'k-', linewidth = 2, label = 'L_coh / L_abs ( > 3)')
ax[1,1].plot(iscan, Lmed / Labs, 'r-', linewidth = 2, label = 'L_med / L_abs ( > 5)')
ax[1,1].set_ylim(0,20)
ax[1,1].set_xlabel('Intensity [ x 10 ** 14 W cm ** -2]')
ax[1,1].set_ylabel('Ratio')
ax[1,1].set_title(sim.Atom + ' at ' + str(sim.q) + ' Harmonic')
ax[1,1].legend(prop={'size':12})
ax[1,1].grid()
fig.set_tight_layout(True)
plt.show()
|
nevercast/home-assistant
|
refs/heads/dev
|
homeassistant/bootstrap.py
|
3
|
"""
homeassistant.bootstrap
~~~~~~~~~~~~~~~~~~~~~~~
Provides methods to bootstrap a home assistant instance.
Each method will return a tuple (bus, statemachine).
After bootstrapping you can add your own components or
start by calling homeassistant.start_home_assistant(bus)
"""
from collections import defaultdict
import logging
import logging.handlers
import os
import shutil
import sys
import homeassistant.core as core
import homeassistant.util.dt as date_util
import homeassistant.util.package as pkg_util
import homeassistant.util.location as loc_util
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.components as core_components
import homeassistant.components.group as group
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
__version__, EVENT_COMPONENT_LOADED, CONF_LATITUDE, CONF_LONGITUDE,
CONF_TEMPERATURE_UNIT, CONF_NAME, CONF_TIME_ZONE, CONF_CUSTOMIZE,
TEMP_CELCIUS, TEMP_FAHRENHEIT)
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = 'component'
PLATFORM_FORMAT = '{}.{}'
ERROR_LOG_FILENAME = 'home-assistant.log'
def setup_component(hass, domain, config=None):
""" Setup a component and all its dependencies. """
if domain in hass.config.components:
return True
_ensure_loader_prepared(hass)
if config is None:
config = defaultdict(dict)
components = loader.load_order_component(domain)
# OrderedSet is empty if component or dependencies could not be resolved
if not components:
return False
for component in components:
if not _setup_component(hass, component, config):
return False
return True
def _handle_requirements(hass, component, name):
""" Installs requirements for component. """
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('lib')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
def _setup_component(hass, domain, config):
""" Setup a component for Home Assistant. """
if domain in hass.config.components:
return True
component = loader.get_component(domain)
missing_deps = [dep for dep in getattr(component, 'DEPENDENCIES', [])
if dep not in hass.config.components]
if missing_deps:
_LOGGER.error(
'Not initializing %s because not all dependencies loaded: %s',
domain, ", ".join(missing_deps))
return False
if not _handle_requirements(hass, component, domain):
return False
try:
if not component.setup(hass, config):
_LOGGER.error('component %s failed to initialize', domain)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error during setup of component %s', domain)
return False
hass.config.components.append(component.DOMAIN)
# Assumption: if a component does not depend on groups
# it communicates with devices
if group.DOMAIN not in getattr(component, 'DEPENDENCIES', []):
hass.pool.add_worker()
hass.bus.fire(
EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN})
return True
def prepare_setup_platform(hass, config, domain, platform_name):
""" Loads a platform and makes sure dependencies are setup. """
_ensure_loader_prepared(hass)
platform_path = PLATFORM_FORMAT.format(domain, platform_name)
platform = loader.get_component(platform_path)
# Not found
if platform is None:
_LOGGER.error('Unable to find platform %s', platform_path)
return None
# Already loaded
elif platform_path in hass.config.components:
return platform
# Load dependencies
for component in getattr(platform, 'DEPENDENCIES', []):
if not setup_component(hass, component, config):
_LOGGER.error(
'Unable to prepare setup for platform %s because '
'dependency %s could not be initialized', platform_path,
component)
return None
if not _handle_requirements(hass, platform, platform_path):
return None
return platform
def mount_local_lib_path(config_dir):
""" Add local library to Python Path """
sys.path.insert(0, os.path.join(config_dir, 'lib'))
# pylint: disable=too-many-branches, too-many-statements, too-many-arguments
def from_config_dict(config, hass=None, config_dir=None, enable_log=True,
verbose=False, daemon=False, skip_pip=False,
log_rotate_days=None):
"""
Tries to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
process_ha_config_upgrade(hass)
process_ha_core_config(hass, config.get(core.DOMAIN, {}))
if enable_log:
enable_logging(hass, verbose, daemon, log_rotate_days)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning('Skipping pip installation of required modules. '
'This may cause issues.')
_ensure_loader_prepared(hass)
# Make a copy because we are mutating it.
# Convert it to defaultdict so components can always have config dict
# Convert values to dictionaries if they are None
config = defaultdict(
dict, {key: value or {} for key, value in config.items()})
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
if not core_components.setup(hass, config):
_LOGGER.error('Home Assistant core failed to initialize. '
'Further initialization aborted.')
return hass
_LOGGER.info('Home Assistant core initialized')
# Setup the components
for domain in loader.load_order_components(components):
_setup_component(hass, domain, config)
return hass
def from_config_file(config_path, hass=None, verbose=False, daemon=False,
skip_pip=True, log_rotate_days=None):
"""
Reads the configuration file and tries to start all the required
functionality. Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
enable_logging(hass, verbose, daemon, log_rotate_days)
config_dict = config_util.load_config_file(config_path)
return from_config_dict(config_dict, hass, enable_log=False,
skip_pip=skip_pip)
def enable_logging(hass, verbose=False, daemon=False, log_rotate_days=None):
""" Setup the logging for home assistant. """
if not daemon:
logging.basicConfig(level=logging.INFO)
fmt = ("%(log_color)s%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s%(reset)s")
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
fmt,
datefmt='%y-%m-%d %H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
_LOGGER.warning(
"Colorlog package not found, console coloring disabled")
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(
logging.Formatter('%(asctime)s %(name)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S'))
logger = logging.getLogger('')
logger.addHandler(err_handler)
logger.setLevel(logging.INFO)
else:
_LOGGER.error(
'Unable to setup error log %s (access denied)', err_log_path)
def process_ha_config_upgrade(hass):
""" Upgrade config if necessary. """
version_path = hass.config.path('.HA_VERSION')
try:
with open(version_path, 'rt') as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = '0.7.7'
if conf_version == __version__:
return
_LOGGER.info('Upgrading config directory from %s to %s', conf_version,
__version__)
lib_path = hass.config.path('lib')
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, 'wt') as outp:
outp.write(__version__)
def process_ha_core_config(hass, config):
""" Processes the [homeassistant] section from the config. """
hac = hass.config
def set_time_zone(time_zone_str):
""" Helper method to set time zone in HA. """
if time_zone_str is None:
return
time_zone = date_util.get_time_zone(time_zone_str)
if time_zone:
hac.time_zone = time_zone
date_util.set_default_time_zone(time_zone)
else:
_LOGGER.error('Received invalid time zone %s', time_zone_str)
for key, attr, typ in ((CONF_LATITUDE, 'latitude', float),
(CONF_LONGITUDE, 'longitude', float),
(CONF_NAME, 'location_name', str)):
if key in config:
try:
setattr(hac, attr, typ(config[key]))
except ValueError:
_LOGGER.error('Received invalid %s value for %s: %s',
typ.__name__, key, attr)
set_time_zone(config.get(CONF_TIME_ZONE))
customize = config.get(CONF_CUSTOMIZE)
if isinstance(customize, dict):
for entity_id, attrs in config.get(CONF_CUSTOMIZE, {}).items():
if not isinstance(attrs, dict):
continue
Entity.overwrite_attribute(entity_id, attrs.keys(), attrs.values())
if CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == 'C':
hac.temperature_unit = TEMP_CELCIUS
elif unit == 'F':
hac.temperature_unit = TEMP_FAHRENHEIT
# If we miss some of the needed values, auto detect them
if None not in (
hac.latitude, hac.longitude, hac.temperature_unit, hac.time_zone):
return
_LOGGER.info('Auto detecting location and temperature unit')
info = loc_util.detect_location_info()
if info is None:
_LOGGER.error('Could not detect location information')
return
if hac.latitude is None and hac.longitude is None:
hac.latitude = info.latitude
hac.longitude = info.longitude
if hac.temperature_unit is None:
if info.use_fahrenheit:
hac.temperature_unit = TEMP_FAHRENHEIT
else:
hac.temperature_unit = TEMP_CELCIUS
if hac.location_name is None:
hac.location_name = info.city
if hac.time_zone is None:
set_time_zone(info.time_zone)
def _ensure_loader_prepared(hass):
""" Ensure Home Assistant loader is prepared. """
if not loader.PREPARED:
loader.prepare(hass)
|
corredD/ePMV
|
refs/heads/master
|
cinema4d/c4dAdaptor.py
|
1
|
"""
Copyright (C) <2010> Autin L.
This file ePMV_git/cinema4d/c4dAdaptor.py is part of ePMV.
ePMV is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ePMV is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ePMV. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
#############################################################################
#
# Author: Ludovic Autin
#
# Copyright: Ludovic Autin TSRI 2010
#
#
#############################################################################
import upy
c4dHelper = upy.getHelperClass()
from ePMV.epmvAdaptor import epmvAdaptor
import c4d
from MolKit.protein import Protein,Chain
from Pmv.pmvPalettes import AtomElements
#from Pmv.pmvPalettes import DavidGoodsell, DavidGoodsellSortedKeys
#from Pmv.pmvPalettes import RasmolAmino, RasmolAminoSortedKeys
#from Pmv.pmvPalettes import Shapely
#from Pmv.pmvPalettes import SecondaryStructureType
class c4dAdaptor(epmvAdaptor):
"""
The specific adaptor for C4D R12.
from ePMV.cinema4d.c4dAdaptor import c4dAdaptor
epmv = c4dAdaptor(debug=1)
epmv.mv.readMolecule("/Users/ludo/blenderKTF/1CRN.pdb")
epmv.mv.computeMSMS("1CRN")
epmv.mv.computeMSMS("1CRN", log=1, display=True, perMol=1,surfName="MSMS-MOL1CRN")
epmv.mv.displayCPK("1CRN",log=1,negate=False,scaleFactor = 1.0)
...
"""
def __init__(self,gui=False,mv=None,debug=0):
self.helper = c4dHelper()
epmvAdaptor.__init__(self,mv,host='c4d',debug=debug)
self.MAX_LENGTH_NAME = 20
self.soft = 'c4d'
#scene and object helper function
# #scene and object helper function
self._getCurrentScene = self.helper.getCurrentScene
## self._addObjToGeom = self.helper.addObjToGeom
# self._host_update = self.helper.update
## self._parseObjectName = self.helper.parseObjectName
self._getObjectName = self.helper.getName
self._getObject = self.helper.getObject
self._addObjectToScene = self.helper.addObjectToScene
self._toggleDisplay = self.helper.toggleDisplay
self._newEmpty = self.helper.newEmpty
# self._deleteObject = self.helper.deleteObject
# #camera and lighting
self._addCameraToScene = self.helper.addCameraToScene
self._addLampToScene = self.helper.addLampToScene
# #display helper function
# self._editLines = self.helper.editLines
## self._createBaseSphere = self.helper.createBaseSphere
## self._instancesAtomsSphere = self.helper.instancesAtomsSphere
# self._Tube = self.helper.Tube
self._createsNmesh = self.helper.createsNmesh
self._PointCloudObject = self.helper.PointCloudObject
# #modify/update geom helper function
## self._updateSphereMesh = self.helper.updateSphereMesh
self._updateSphereObj = self.helper.updateSphereObj
# self._updateSphereObjs = self.helper.updateSphereObjs
# self._updateTubeMesh = self.helper.updateTubeMesh
self._updateTubeObj = self.helper.updateTubeObj
# self._updateMesh = self.helper.updateMesh
# #color helper function
# self._changeColor = self.helper.changeColor
# self._checkChangeMaterial = self.helper.checkChangeMaterial
# self._changeSticksColor = self.helper.changeSticksColor
# self._checkChangeStickMaterial = self.helper.checkChangeStickMaterial
#define the general function
self.use_progressBar = False
self.colorProxyObject = True
self._progressBar = self.helper.progressBar
self._resetProgressBar = self.helper.resetProgressBar
# self._render = self.helper.render
self.rep = self._getCurrentScene().GetDocumentName()
self.keywords["ribcolor"]={"name":"use vertex color for ribbon geometry","value":False,"type":"checkbox"}
# def _progressBar(self,progress,label):
# #the progessbar use the StatusSetBar
# c4d.StatusSetText(label)
# c4d.StatusSetBar(int(progress*100.))
#
# def _resetProgressBar(self,value):
# c4d.StatusClear()
def synchronize(self):
root = self.helper.getObject("ePMV")
if self.synchro_realtime and root is None :
root = self.helper.newEmpty("ePMV")
root.MakeTag(1027093)
self.helper.addObjectToScene(self.helper.getCurrentScene(),root)
def _makeRibbon(self,name,coords,shape=None,spline=None,parent=None):
sc=self._getCurrentScene()
if shape is None :
circle=self.helper.Circle(name+"circle",rad=0.5)
self._addObjectToScene(sc,circle)
if spline is None :
spline = self.helper.spline(name+"_spline",coords,scene=sc)
# self._addObjectToScene(sc,spline[0])
nurb=self.helper.sweepnurbs(name)
self._addObjectToScene(sc,nurb,parent=parent)
self.helper.reParent(spline[0],nurb)
self.helper.reParent(circle,nurb)
return nurb
def _createBaseSphere(self,name="",quality=0,cpkRad=0.,scale=1.,radius=1.,
parent=None,scene=None):
# name=mol.name+"_b_cpk",quality=opts[4],cpkRad=opts[3],
# scale=opts[2],parent=root
QualitySph={"0":15,"1":3,"2":6,"3":8,"4":16,"5":32}
segments = QualitySph[str(quality)]
iMe={}
doc = self.helper.getCurrentScene()
baseparent=self.helper.getObject(name)
if baseparent is None :
baseparent=self.helper.newEmpty(name)
self.helper.addObjectToScene(doc,baseparent,parent=parent)
self.helper.toggleDisplay(baseparent,False)
baseShape = self.helper.getObject(name+"_shape")
if baseShape is None :
baseShape = self.helper.newEmpty(name+"_shape")
self.helper.addObjectToScene(doc,baseShape,parent=baseparent)
basesphere=self.helper.getObject(name+"basesphere")
if basesphere is None :
meshsphere,basesphere=self.helper.Sphere(name+"basesphere",res=segments,
parent=baseShape)
# self.helper.toggleDisplay(basesphere,display=False)
for atn in list(self.AtmRadi.keys()):
#when we create we dont want to scale, just take the radius
rad=float(self.AtmRadi[atn])
atparent=self.helper.getObject(name+"_"+atn)
if atparent is None :
atparent=self.helper.newEmpty(name+"_"+atn)
iMe[atn]= self.helper.getMesh("mesh_"+name+'_'+atn)
if iMe[atn] is None :
# iMe[atn],ob=self.helper.Sphere(name+'_'+atn,
# res=segments,
# mat = atn)
if self.use_instances :
iMe[atn] = self.helper.setInstance("mesh_"+atn+"_"+name,baseShape)
else :
iMe[atn] = self.helper.newClone("mesh_"+atn+"_"+name,basesphere)
self.helper.scaleObj(iMe[atn],float(rad))
#iMe[atn]=c4d.BaseObject(c4d.Osphere)
self.helper.addObjectToScene(doc,atparent,parent=baseparent)
self.helper.addObjectToScene(doc,iMe[atn],parent=atparent)
if self.use_instances :
iMe[atn]=atparent
return iMe
def _changeColor(self,geom,colors,perVertex=True,proxyObject=True,pb=False):
if hasattr(geom,'mesh'):
if geom.name[:4] in ['Heli', 'Shee', 'Coil', 'Turn', 'Stra']:
proxyObject=False
objToColor = geom.mesh
elif hasattr(geom,"obj"):
objToColor = geom.obj
else :
objToColor = None
if hasattr(geom,"name") :
if geom.name[:4] in ['secondarystructure','Heli', 'Shee', 'Coil', 'Turn', 'Stra']:
proxyObject=False
if self.ribcolor :
proxyObject=True
if objToColor is None :
if type(geom) is str :
objToColor = self.helper.getObject(geom)
elif type(geom) is list :
objToColor = geom[0]
if type(geom) is str :
objToColor = self.helper.getObject(geom)
else :
objToColor = geom
else :
objToColor = geom
self.helper.changeColor(self.helper.getName(objToColor),colors,perVertex=perVertex,
proxyObject=proxyObject,pb=pb)
def _armature(self,name,atomset,coords=None,root=None,scn=None):
scn=self.helper.getCurrentScene()
names = None
if coords is not None :
c = coords
else :
c = atomset.coords
names = [x.full_name().replace(":","_") for x in atomset]
object,bones=self.helper.armature(name,c,listeName=names,
root=root,scn=scn)
return object,bones
def _updateArmature(self,name,atomset,coords=None,root=None,scn=None):
scn=self.helper.getCurrentScene()
names = None
if coords is not None :
c = coords
else :
c = atomset.coords
names = [x.full_name().replace(":","_") for x in atomset]
self.helper.updateArmature(name,c,listeName=names,
root=root,scn=scn)
def _metaballs(self,name,coords,radius,scn=None,root=None):
#by default we build the clouds metaballs...maybe could do on particle
basename = name.split("_")[0] #basename form : mol.name+"_metaball"
cloud = self.helper.getObject(name+"_cloud")
if cloud is None :
cloud = self.helper.PointCloudObject(name+"_cloud",
vertices=coords,
parent=None,atomarray=False)[0]
metab=self.helper.create_metaballs(name,sourceObj=cloud,parent=root,
coords=coords)
return [None,metab]
def _instancesAtomsSphere_full(self,name,x,iMe,scn,mat=None,scale=1.0,Res=32,
R=None,join=0,geom=None,dialog=None,pb=False):
#radius made via baseMesh...
#except for balls, need to scale?#by default : 0.3?
if scn == None :
scn=self.helper.getCurrentScene()
sphers=[]
k=0
n='S'
nn='cpk'
if name.find('balls') != (-1) :
n='B'
nn='balls'
if geom is not None:
coords=geom.getVertices()
else :
coords=x.coords
hiera = 'default'
# parent=self.findatmParentHierarchie(x[0],n,hiera)
mol = x[0].getParentOfType(Protein)
if pb :
self.helper.resetProgressBar()
self.helper.progressBar(progress=0,label = "creating "+name)
for c in mol.chains:
spher=[]
oneparent = True
atoms = c.residues.atoms
parent=self.helper.getObject(mol.geomContainer.masterGeom.chains_obj[c.name+"_"+nn])
#print "finded",parent
for j in range(len(atoms.coords)):
#at=res.atoms[j]
at=atoms[j]
radius = at.radius
# scaleFactor=float(R)+float(radius)*float(scale)
atN=at.name
if atN[0] not in list(AtomElements.keys()) : atN="A"
fullname = self.atomNameRule(at,n)
#at.full_name().replace("'","b")+"n"+str(at.number)
#print fullname
atC=at.coords#at._coords[0]
spher.append( c4d.BaseObject(c4d.Oinstance) )
if atN[0] in iMe :
sm=iMe[atN[0]]
else :
sm=iMe["A"]
spher[j][1001]=sm
#spher[j][1001]=1
spher[j].SetName(fullname)#.replace(":","_")
sc = sm[905].x #radius of parent mesh
#we can compare to at.vwdRadius
if sc != radius :
print(("rad",sc,radius,at.name))
#if sc != scaleFactor :
# if n=='B' :
# scale = 1.
# spher[j][905]=c4d.Vector(float((1/sc)*scale),float((1/sc)*scale),float((1/sc)*scale)) #
spher[j].SetAbsPos(self.helper.FromVec(atC))
texture = spher[j].MakeTag(c4d.Ttexture)
mat = self.helper.getMaterial(atN[0])
if mat is None :
self.setupMaterials()
mat = self.helper.getMaterial(atN[0])
texture[1010] = mat
#p = self.findatmParentHierarchie(at,n,hiera)
#print "dinded",p
# if parent != p :
# cp = p
# oneparent = False
# parent = p
# else :
cp = parent
#print "parent",cp
self.helper.addObjectToScene(self.helper.getCurrentScene(),spher[j],parent=cp)
self.helper.toggleDisplay(spher[j],False)
k=k+1
if pb :
self.helper.progressBar(progress=j/len(coords))
#dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESS] = j/len(coords)
#dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESSFULLSIZE] = True
#c4d.StatusSetBar(j/len(coords))
self.helper.update()
sphers.extend(spher)
if pb :
self.helper.resetProgressBar(0)
return sphers
def createGUI(self):
self.gui = epmvGui(epmv=self,rep=self.rep)
def updateSphereObjs(self,g):
if not hasattr(g,'obj') : return
newcoords=g.getVertices()
#print "upadteObjSpheres"
[self.helper.updateObjectPos(g.obj[i],newcoords[i]) for i in range(len(g.obj))]
def updateMolAtomCoordBones(self,mol,index=-1):
#problem, theses are only CA
vt = []
bones = mol.geomContainer.geoms["armature"][1]
vt = [self.helper.ToVec(j.GetMg().off) for j in bones]
# for join in bones:
# pos=join.GetMg().off
# vt.append(self.helper.ToVec(pos))
print(vt[0])
return vt
def updateMolAtomCoordSpline(self,mol,index=-1):
#problem, theses are only CA
vts = []
# mesh = mol.geomContainer.geoms['lines'].obj
for ch in mol.chains:
name=mol.name+"_"+ch.name+"spline"
spline = self.helper.getCurrentScene().SearchObject(name)
points = spline.GetAllPoints()
matr= spline.GetMg()
vt=[self.helper.ToVec(x*matr) for x in points]
#vt = map(lambda x,m=matr: ToVec(x),points)
#vt = [self.helper.ToVec(x) for x in points]
vts.append(vt)
return vts
def updateMolAtomCoordLines(self,mol,index=-1):
#just need that cpk or the balls have been computed once..
#balls and cpk should be linked to have always same position
# let balls be dependant on cpk => contraints? or update
# the idea : spline/dynamic move link to cpl whihc control balls
# this should be the actual coordinate of the ligand
# what about the rc...
vts = []
# mesh = mol.geomContainer.geoms['lines'].obj
for ch in mol.chains:
vt=[]
mesh = mol.geomContainer.geoms[ch.full_name()+'_line'][0]
#check the derform mode before
deform = mesh.GetDeformMode()
meshcache = mesh.GetDeformCache()
if meshcache is not None:
meshcache = mesh.GetDeformCache()
points = meshcache.GetAllPoints()
vt=[self.helper.ToVec(x) for x in points]
else :
points = mesh.GetAllPoints()
matr= mesh.GetMg()
vt=[self.helper.ToVec(x*matr) for x in points]
vts.extend(vt)
#vt = map(lambda x,m=matr: ToVec(x*m),points)
#these are the cpk
if hasattr(mol.geomContainer.geoms['cpk'],'obj'):
sph = mol.geomContainer.geoms['cpk'].obj
#each have to be translate
[x.SetAbsPos(self.helper.FromVec(p)) for x,p in zip(sph,vts)]
#map(lambda x,p:x.SetAbsPos(p),sph,points)
masterCPK=sph[0].GetUp()
if meshcache is None:
masterCPK.SetMg(matr)
return vts#updateMolAtomCoordCPK(mol,index=index)
# def display_CPK(self,mol,sel,display,needRedraw=False,quality=0,cpkRad=0.0,scaleFactor=1.0,useTree="default",dialog=None):
# sc = self.getCurrentScene()
# g = mol.geomContainer.geoms['cpk']
# #print g
# #name=selection+"_cpk"
# #select=self.select(selection,negate=False, only=True, xor=False, log=0,
# # intersect=False)
# #print name,select
# #sel=select.findType(Atom)
# if not hasattr(g,"obj"): #if no mesh have to create it for evey atms
# name=mol.name+"_cpk"
# #print name
# mesh=createBaseSphere(name="base_cpk",quality=quality,cpkRad=cpkRad,
# scale=scaleFactor,parent=mol.geomContainer.masterGeom.obj)
# ob=instancesAtomsSphere(name,mol.allAtoms,mesh,sc,scale=scaleFactor,
# Res=quality,join=0,dialog=dialog)
# addObjToGeom([ob,mesh],g)
# for i,o in enumerate(ob):
# # if dialog != None :
# # dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESS] = j/len(coords)
# # #dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESSFULLSIZE] = True
# # dialog.set(dialog._progess,float(i/len(ob)))#dialog.bc)
# # getCurrentScene().Message(c4d.MULTIMSG_UP)
# # c4d.draw_views(c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_ANIMATION)
# parent=mol.geomContainer.masterGeom.obj
# hierarchy=parseObjectName(o)
# if hierarchy != "" :
# if useTree == 'perRes' :
# parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
# elif useTree == 'perAtom' :
# parent = getObject(o.GetName().split("_")[1])
# else :
# parent = getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_cpk"])
# addObjectToScene(sc,o,parent=parent)
# toggleDisplay(o,False) #True per default
#
# #elif hasattr(g,"obj") and display :
# #updateSphereMesh(g,quality=quality,cpkRad=cpkRad,scale=scaleFactor)
# #if needRedraw : updateSphereObj(g)
# #if hasattr(g,"obj"):
# else :
# updateSphereMesh(g,quality=quality,cpkRad=cpkRad,scale=scaleFactor)
# atoms=sel#findType(Atom) already done
# for atms in atoms:
# nameo = "S_"+atms.full_name()
# o=getObject(nameo)#Blender.Object.Get (nameo)
# if o != None :
# toggleDisplay(o,display)
# if needRedraw : updateObjectPos(o,atms.coords)
def _editLines1(self,molecules,atomSets):
scn = self.helper.getCurrentScene()
sel=atomSets[0]
ch={}
mol = sel.top.uniq
for at in sel:
c = at.parent.parent
if c not in ch :
ch[c] = [[],[]]
ch[c][0].append(at.coords)
bonds= at.bonds
indices = [(x.atom1._bndIndex_,
x.atom2._bndIndex_) for x in bonds]
ch[c][1].extend(indices)
for c in ch :
parent = self.helper.getObject(c.full_name())
lines = self.helper.getObject(c.full_name()+'_line')
if lines == None :
arr = c4d.BaseObject(c4d.Oatomarray)
arr.SetName(c.full_name()+'_lineds')
arr[1000] = 0.1 #radius cylinder
arr[1001] = 0.1 #radius sphere
arr[1002] = 3 #subdivision
self.helper.addObjectToScene(scn,arr,parent=parent)
lines = self.helper.createsNmesh(c.full_name()+'_line',
ch[c][0],
None,ch[c][1])
self.helper.addObjectToScene(scn,lines[0],parent=arr)
mol.geomContainer.geoms[c.full_name()+'_line'] = lines
else :
self.helper.updatePoly(lines,vertices=ch[c][0],faces=ch[c][1])
def _editLines(self,molecules,atomSets):
print "editLines"
scn = self.helper.getCurrentScene()
sel=atomSets[0]
ch={}
mol = sel[0].getParentOfType(Protein)
v = mol.geomContainer.geoms["bonded"].getVertices()#not update ?
f = mol.geomContainer.geoms["bonded"].getFaces()
parent = self.helper.getObject(mol.full_name())
lines = self.helper.getObject(mol.full_name()+'_line')
arr = self.helper.getObject(mol.full_name()+'_lineds')
if lines == None :
arr = c4d.BaseObject(c4d.Oatomarray)
arr.SetName(mol.full_name()+'_lineds')
arr[1000] = 0.1 #radius cylinder
arr[1001] = 0.1 #radius sphere
arr[1002] = 3 #subdivision
self.helper.addObjectToScene(scn,arr,parent=parent)
lines = self.helper.createsNmesh(mol.full_name()+'_line',
v,
None,f)
self.helper.addObjectToScene(scn,lines[0],parent=arr)
mol.geomContainer.geoms[mol.full_name()+'_line'] = lines
else :
print "what"
# self.helper.updatePoly(lines,vertices=v,faces=f)
# self.helper.redoPoly(lines,v,f,parent=arr)
self.helper.updateMesh(lines,vertices=v,faces = f)
def _editLinesWorking(self,molecules,atomSets):
scn = self.helper.getCurrentScene()
for mol, atms, in map(None, molecules, atomSets):
#check if line exist
for ch in mol.chains:
parent = self.helper.getObject(ch.full_name())
lines = self.helper.getObject(ch.full_name()+'_line')
if lines == None :
arr = c4d.BaseObject(c4d.Oatomarray)
arr.SetName(ch.full_name()+'_lineds')
arr[1000] = 0.1 #radius cylinder
arr[1001] = 0.1 #radius sphere
arr[1002] = 3 #subdivision
self.helper.addObjectToScene(scn,arr,parent=parent)
bonds, atnobnd = ch.residues.atoms.bonds
indices = [(x.atom1._bndIndex_,
x.atom2._bndIndex_) for x in bonds]
lines = self.helper.createsNmesh(ch.full_name()+'_line',
ch.residues.atoms.coords,
None,indices)
self.helper.addObjectToScene(scn,lines[0],parent=arr)
mol.geomContainer.geoms[ch.full_name()+'_line'] = lines
#display using AtomArray
else : #need to update
# self.helper._updateLines(lines, chains=ch)
self.helper.updatePoly(lines,vertices=ch.residues.atoms.coords)
|
tchernomax/ansible
|
refs/heads/devel
|
contrib/inventory/fleet.py
|
79
|
#!/usr/bin/env python
"""
fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
returns it under the host group 'coreos'
"""
# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the vagrant.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
import json
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
# list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = {'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
# ------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
# pass through the port, in case it's non standard.
result = details[0]
print(json.dumps(result))
sys.exit(1)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(1)
|
yograterol/django
|
refs/heads/master
|
tests/field_deconstruction/tests.py
|
69
|
from __future__ import unicode_literals
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission", models.CASCADE)
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
# Test on_delete
field = models.ForeignKey("auth.User", models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
|
glenngillen/dotfiles
|
refs/heads/master
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydev_imps/_pydev_saved_modules.py
|
2
|
import sys
IS_PY2 = sys.version_info < (3,)
import threading
import time
import socket
import select
if IS_PY2:
import thread
import Queue as _queue
import xmlrpclib
import SimpleXMLRPCServer as _pydev_SimpleXMLRPCServer
import BaseHTTPServer
else:
import _thread as thread
import queue as _queue
import xmlrpc.client as xmlrpclib
import xmlrpc.server as _pydev_SimpleXMLRPCServer
import http.server as BaseHTTPServer
|
pixelated/pixelated-user-agent
|
refs/heads/master
|
service/pixelated/resources/mail_resource.py
|
2
|
#
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import json
from twisted.python.log import err
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from pixelated.resources import respond_json_deferred, BaseResource, handle_error_deferred
from pixelated.support import replier
class MailTags(Resource):
isLeaf = True
def __init__(self, mail_id, mail_service):
Resource.__init__(self)
self._mail_service = mail_service
self._mail_id = mail_id
def render_POST(self, request):
new_tags = json.loads(request.content.read()).get('newtags')
d = self._mail_service.update_tags(self._mail_id, new_tags)
d.addCallback(lambda mail: respond_json_deferred(mail.as_dict(), request))
def handle403(failure):
failure.trap(ValueError)
return respond_json_deferred(failure.getErrorMessage(), request, 403)
d.addErrback(handle403)
return NOT_DONE_YET
class Mail(Resource):
def __init__(self, mail_id, mail_service):
Resource.__init__(self)
self.putChild('tags', MailTags(mail_id, mail_service))
self._mail_id = mail_id
self._mail_service = mail_service
def render_GET(self, request):
def populate_reply(mail):
mail_dict = mail.as_dict()
current_user = self._mail_service.account_email
sender = mail.headers.get('Reply-to', mail.headers.get('From'))
to = mail.headers.get('To', [])
ccs = mail.headers.get('Cc', [])
mail_dict['replying'] = replier.generate_recipients(sender, to, ccs, current_user)
return mail_dict
d = self._mail_service.mail(self._mail_id)
d.addCallback(lambda mail: populate_reply(mail))
d.addCallback(lambda mail_dict: respond_json_deferred(mail_dict, request))
d.addErrback(handle_error_deferred, request)
return NOT_DONE_YET
def render_DELETE(self, request):
def response_failed(failure):
err(failure, 'something failed')
request.finish()
d = self._mail_service.delete_mail(self._mail_id)
d.addCallback(lambda _: respond_json_deferred(None, request))
d.addErrback(response_failed)
return NOT_DONE_YET
class MailResource(BaseResource):
def __init__(self, services_factory):
BaseResource.__init__(self, services_factory)
def getChild(self, mail_id, request):
_mail_service = self.mail_service(request)
return Mail(mail_id, _mail_service)
|
israelbenatar/boto
|
refs/heads/develop
|
boto/rds/__init__.py
|
126
|
# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import urllib
from boto.connection import AWSQueryConnection
from boto.rds.dbinstance import DBInstance
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.optiongroup import OptionGroup, OptionGroupOption
from boto.rds.parametergroup import ParameterGroup
from boto.rds.dbsnapshot import DBSnapshot
from boto.rds.event import Event
from boto.rds.regioninfo import RDSRegionInfo
from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.regioninfo import get_regions
from boto.rds.logfile import LogFile, LogFileObject
def regions():
"""
Get all available regions for the RDS service.
:rtype: list
:return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo`
"""
return get_regions(
'rds',
region_cls=RDSRegionInfo,
connection_cls=RDSConnection
)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.rds.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.rds.RDSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
#boto.set_stream_logger('rds')
class RDSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'rds.amazonaws.com'
APIVersion = '2013-05-15'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True,
profile_name=None):
if not region:
region = RDSRegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(RDSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
# DB Instance methods
def get_all_dbinstances(self, instance_id=None, max_records=None,
marker=None):
"""
Retrieve all the DBInstances in your account.
:type instance_id: str
:param instance_id: DB Instance identifier. If supplied, only
information this instance will be returned.
Otherwise, info about all DB Instances will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbinstance.DBInstance`
"""
params = {}
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBInstances', params,
[('DBInstance', DBInstance)])
def create_dbinstance(self,
id,
allocated_storage,
instance_class,
master_username,
master_password,
port=3306,
engine='MySQL5.1',
db_name=None,
param_group=None,
security_groups=None,
availability_zone=None,
preferred_maintenance_window=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
engine_version=None,
auto_minor_version_upgrade=True,
character_set_name = None,
db_subnet_group_name = None,
license_model = None,
option_group_name = None,
iops=None,
vpc_security_groups=None,
):
# API version: 2013-09-09
# Parameter notes:
# =================
# id should be db_instance_identifier according to API docs but has been left
# id for backwards compatibility
#
# security_groups should be db_security_groups according to API docs but has been left
# security_groups for backwards compatibility
#
# master_password should be master_user_password according to API docs but has been left
# master_password for backwards compatibility
#
# instance_class should be db_instance_class according to API docs but has been left
# instance_class for backwards compatibility
"""
Create a new DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type allocated_storage: int
:param allocated_storage: Initially allocated storage size, in GBs.
Valid values are depending on the engine value.
* MySQL = 5--3072
* oracle-se1 = 10--3072
* oracle-se = 10--3072
* oracle-ee = 10--3072
* sqlserver-ee = 200--1024
* sqlserver-se = 200--1024
* sqlserver-ex = 30--1024
* sqlserver-web = 30--1024
* postgres = 5--3072
:type instance_class: str
:param instance_class: The compute and memory capacity of
the DBInstance. Valid values are:
* db.t1.micro
* db.m1.small
* db.m1.medium
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type engine: str
:param engine: Name of database engine. Defaults to MySQL but can be;
* MySQL
* oracle-se1
* oracle-se
* oracle-ee
* sqlserver-ee
* sqlserver-se
* sqlserver-ex
* sqlserver-web
* postgres
:type master_username: str
:param master_username: Name of master user for the DBInstance.
* MySQL must be;
- 1--16 alphanumeric characters
- first character must be a letter
- cannot be a reserved MySQL word
* Oracle must be:
- 1--30 alphanumeric characters
- first character must be a letter
- cannot be a reserved Oracle word
* SQL Server must be:
- 1--128 alphanumeric characters
- first character must be a letter
- cannot be a reserver SQL Server word
:type master_password: str
:param master_password: Password of master user for the DBInstance.
* MySQL must be 8--41 alphanumeric characters
* Oracle must be 8--30 alphanumeric characters
* SQL Server must be 8--128 alphanumeric characters.
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535].
* MySQL defaults to 3306
* Oracle defaults to 1521
* SQL Server defaults to 1433 and _cannot_ be 1434, 3389,
47001, 49152, and 49152 through 49156.
* PostgreSQL defaults to 5432
:type db_name: str
:param db_name: * MySQL:
Name of a database to create when the DBInstance
is created. Default is to create no databases.
Must contain 1--64 alphanumeric characters and cannot
be a reserved MySQL word.
* Oracle:
The Oracle System ID (SID) of the created DB instances.
Default is ORCL. Cannot be longer than 8 characters.
* SQL Server:
Not applicable and must be None.
* PostgreSQL:
Name of a database to create when the DBInstance
is created. Default is to create no databases.
Must contain 1--63 alphanumeric characters. Must
begin with a letter or an underscore. Subsequent
characters can be letters, underscores, or digits (0-9)
and cannot be a reserved PostgreSQL word.
:type param_group: str or ParameterGroup object
:param param_group: Name of DBParameterGroup or ParameterGroup instance
to associate with this DBInstance. If no groups are
specified no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to
authorize on this DBInstance.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC)
during which maintenance can occur.
Default is Sun:05:00-Sun:09:00
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
For Microsoft SQL Server, must be set to false. You cannot set
the AvailabilityZone parameter if the MultiAZ parameter is
set to true.
:type engine_version: str
:param engine_version: The version number of the database engine to use.
* MySQL format example: 5.1.42
* Oracle format example: 11.2.0.2.v2
* SQL Server format example: 10.50.2789.0.v1
* PostgreSQL format example: 9.3
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
automatically to the Read Replica
during the maintenance window.
Default is True.
:type character_set_name: str
:param character_set_name: For supported engines, indicates that the DB Instance
should be associated with the specified CharacterSet.
:type db_subnet_group_name: str
:param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance.
If there is no DB Subnet Group, then it is a non-VPC DB
instance.
:type license_model: str
:param license_model: License model information for this DB Instance.
Valid values are;
- license-included
- bring-your-own-license
- general-public-license
All license types are not supported on all engines.
:type option_group_name: str
:param option_group_name: Indicates that the DB Instance should be associated
with the specified option group.
:type iops: int
:param iops: The amount of IOPS (input/output operations per second) to Provisioned
for the DB Instance. Can be modified at a later date.
Must scale linearly. For every 1000 IOPS provision, you must allocated
100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL
and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS.
If you specify a value, it must be at least 1000 IOPS and you must
allocate 100 GB of storage.
:type vpc_security_groups: list of str or a VPCSecurityGroupMembership object
:param vpc_security_groups: List of VPC security group ids or a list of
VPCSecurityGroupMembership objects this DBInstance should be a member of
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
# boto argument alignment with AWS API parameter names:
# =====================================================
# arg => AWS parameter
# allocated_storage => AllocatedStorage
# auto_minor_version_update => AutoMinorVersionUpgrade
# availability_zone => AvailabilityZone
# backup_retention_period => BackupRetentionPeriod
# character_set_name => CharacterSetName
# db_instance_class => DBInstanceClass
# db_instance_identifier => DBInstanceIdentifier
# db_name => DBName
# db_parameter_group_name => DBParameterGroupName
# db_security_groups => DBSecurityGroups.member.N
# db_subnet_group_name => DBSubnetGroupName
# engine => Engine
# engine_version => EngineVersion
# license_model => LicenseModel
# master_username => MasterUsername
# master_user_password => MasterUserPassword
# multi_az => MultiAZ
# option_group_name => OptionGroupName
# port => Port
# preferred_backup_window => PreferredBackupWindow
# preferred_maintenance_window => PreferredMaintenanceWindow
# vpc_security_groups => VpcSecurityGroupIds.member.N
params = {
'AllocatedStorage': allocated_storage,
'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower() if auto_minor_version_upgrade else None,
'AvailabilityZone': availability_zone,
'BackupRetentionPeriod': backup_retention_period,
'CharacterSetName': character_set_name,
'DBInstanceClass': instance_class,
'DBInstanceIdentifier': id,
'DBName': db_name,
'DBParameterGroupName': (param_group.name
if isinstance(param_group, ParameterGroup)
else param_group),
'DBSubnetGroupName': db_subnet_group_name,
'Engine': engine,
'EngineVersion': engine_version,
'Iops': iops,
'LicenseModel': license_model,
'MasterUsername': master_username,
'MasterUserPassword': master_password,
'MultiAZ': str(multi_az).lower() if multi_az else None,
'OptionGroupName': option_group_name,
'Port': port,
'PreferredBackupWindow': preferred_backup_window,
'PreferredMaintenanceWindow': preferred_maintenance_window,
}
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if vpc_security_groups:
l = []
for vpc_grp in vpc_security_groups:
if isinstance(vpc_grp, VPCSecurityGroupMembership):
l.append(vpc_grp.vpc_group)
else:
l.append(vpc_grp)
self.build_list_params(params, l, 'VpcSecurityGroupIds.member')
# Remove any params set to None
for k, v in params.items():
if v is None: del(params[k])
return self.get_object('CreateDBInstance', params, DBInstance)
def create_dbinstance_read_replica(self, id, source_id,
instance_class=None,
port=3306,
availability_zone=None,
auto_minor_version_upgrade=None):
"""
Create a new DBInstance Read Replica.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type source_id: str
:param source_id: Unique identifier for the DB Instance for which this
DB Instance will act as a Read Replica.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Default is to inherit from
the source DB Instance.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Default is to inherit from source DB Instance.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
automatically to the Read Replica
during the maintenance window.
Default is to inherit this value
from the source DB Instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
params = {'DBInstanceIdentifier': id,
'SourceDBInstanceIdentifier': source_id}
if instance_class:
params['DBInstanceClass'] = instance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
if auto_minor_version_upgrade is not None:
if auto_minor_version_upgrade is True:
params['AutoMinorVersionUpgrade'] = 'true'
else:
params['AutoMinorVersionUpgrade'] = 'false'
return self.get_object('CreateDBInstanceReadReplica',
params, DBInstance)
def promote_read_replica(self, id,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promote a Read Replica to a standalone DB Instance.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
params = {'DBInstanceIdentifier': id}
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
return self.get_object('PromoteReadReplica', params, DBInstance)
def modify_dbinstance(self, id, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
instance_class=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
apply_immediately=False,
iops=None,
vpc_security_groups=None,
new_instance_id=None,
):
"""
Modify an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type param_group: str or ParameterGroup object
:param param_group: Name of DBParameterGroup or ParameterGroup instance
to associate with this DBInstance. If no groups are
specified no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC)
during which maintenance can
occur.
Default is Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Changes will be applied at
next maintenance window unless
apply_immediately is True.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be applied
as soon as possible rather than waiting for
the next preferred maintenance window.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:type iops: int
:param iops: The amount of IOPS (input/output operations per second) to Provisioned
for the DB Instance. Can be modified at a later date.
Must scale linearly. For every 1000 IOPS provision, you must allocated
100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL
and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS.
If you specify a value, it must be at least 1000 IOPS and you must
allocate 100 GB of storage.
:type vpc_security_groups: list of str or a VPCSecurityGroupMembership object
:param vpc_security_groups: List of VPC security group ids or a
VPCSecurityGroupMembership object this DBInstance should be a member of
:type new_instance_id: str
:param new_instance_id: New name to rename the DBInstance to.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
params = {'DBInstanceIdentifier': id}
if param_group:
params['DBParameterGroupName'] = (param_group.name
if isinstance(param_group, ParameterGroup)
else param_group)
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if vpc_security_groups:
l = []
for vpc_grp in vpc_security_groups:
if isinstance(vpc_grp, VPCSecurityGroupMembership):
l.append(vpc_grp.vpc_group)
else:
l.append(vpc_grp)
self.build_list_params(params, l, 'VpcSecurityGroupIds.member')
if preferred_maintenance_window:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if master_password:
params['MasterUserPassword'] = master_password
if allocated_storage:
params['AllocatedStorage'] = allocated_storage
if instance_class:
params['DBInstanceClass'] = instance_class
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
if multi_az:
params['MultiAZ'] = 'true'
if apply_immediately:
params['ApplyImmediately'] = 'true'
if iops:
params['Iops'] = iops
if new_instance_id:
params['NewDBInstanceIdentifier'] = new_instance_id
return self.get_object('ModifyDBInstance', params, DBInstance)
def delete_dbinstance(self, id, skip_final_snapshot=False,
final_snapshot_id=''):
"""
Delete an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether a final
db snapshot is created before the instance
is deleted. If True, no snapshot
is created. If False, a snapshot
is created before deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
params = {'DBInstanceIdentifier': id}
if skip_final_snapshot:
params['SkipFinalSnapshot'] = 'true'
else:
params['SkipFinalSnapshot'] = 'false'
params['FinalDBSnapshotIdentifier'] = final_snapshot_id
return self.get_object('DeleteDBInstance', params, DBInstance)
def reboot_dbinstance(self, id):
"""
Reboot DBInstance.
:type id: str
:param id: Unique identifier of the instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The rebooting db instance.
"""
params = {'DBInstanceIdentifier': id}
return self.get_object('RebootDBInstance', params, DBInstance)
# DBParameterGroup methods
def get_all_dbparameter_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all parameter groups associated with your account in a region.
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
If not provided, all DBParameter groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
"""
params = {}
if groupname:
params['DBParameterGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBParameterGroups', params,
[('DBParameterGroup', ParameterGroup)])
def get_all_dbparameters(self, groupname, source=None,
max_records=None, marker=None):
"""
Get all parameters associated with a ParameterGroup
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
:type source: str
:param source: Specifies which parameters to return.
If not specified, all parameters will be returned.
Valid values are: user|system|engine-default
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
:return: The ParameterGroup
"""
params = {'DBParameterGroupName': groupname}
if source:
params['Source'] = source
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
pg = self.get_object('DescribeDBParameters', params, ParameterGroup)
pg.name = groupname
return pg
def create_parameter_group(self, name, engine='MySQL5.1', description=''):
"""
Create a new dbparameter group for your account.
:type name: string
:param name: The name of the new dbparameter group
:type engine: str
:param engine: Name of database engine.
:type description: string
:param description: The description of the new dbparameter group
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name,
'DBParameterGroupFamily': engine,
'Description': description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup)
def modify_parameter_group(self, name, parameters=None):
"""
Modify a ParameterGroup for your account.
:type name: string
:param name: The name of the new ParameterGroup
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_list('ModifyDBParameterGroup', params,
ParameterGroup, verb='POST')
def reset_parameter_group(self, name, reset_all_params=False,
parameters=None):
"""
Resets some or all of the parameters of a ParameterGroup to the
default value
:type key_name: string
:param key_name: The name of the ParameterGroup to reset
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The parameters to reset. If not supplied,
all parameters will be reset.
"""
params = {'DBParameterGroupName': name}
if reset_all_params:
params['ResetAllParameters'] = 'true'
else:
params['ResetAllParameters'] = 'false'
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_status('ResetDBParameterGroup', params)
def delete_parameter_group(self, name):
"""
Delete a ParameterGroup from your account.
:type key_name: string
:param key_name: The name of the ParameterGroup to delete
"""
params = {'DBParameterGroupName': name}
return self.get_status('DeleteDBParameterGroup', params)
# DBSecurityGroup methods
def get_all_dbsecurity_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all security groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of security groups to retrieve.
If not provided, all security groups will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
"""
params = {}
if groupname:
params['DBSecurityGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSecurityGroups', params,
[('DBSecurityGroup', DBSecurityGroup)])
def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName': name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params,
DBSecurityGroup)
group.name = name
group.description = description
return group
def delete_dbsecurity_group(self, name):
"""
Delete a DBSecurityGroup from your account.
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
params = {'DBSecurityGroupName': name}
return self.get_status('DeleteDBSecurityGroup', params)
def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR a CIDR block but not both.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
you are granting access to.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security group you are granting
access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName': group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = urllib.quote(cidr_ip)
return self.get_object('AuthorizeDBSecurityGroupIngress', params,
DBSecurityGroup)
def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None,
ec2_security_group_owner_id=None, cidr_ip=None):
"""
Remove an existing rule from an existing security group.
You need to pass in either ec2_security_group_name and
ec2_security_group_owner_id OR a CIDR block.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
from which you are removing access.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security from which you are
removing access.
:type cidr_ip: string
:param cidr_ip: The CIDR block from which you are removing access.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName': group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = cidr_ip
return self.get_object('RevokeDBSecurityGroupIngress', params,
DBSecurityGroup)
# For backwards compatibility. This method was improperly named
# in previous versions. I have renamed it to match the others.
revoke_security_group = revoke_dbsecurity_group
# DBSnapshot methods
def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
max_records=None, marker=None):
"""
Get information about DB Snapshots.
:type snapshot_id: str
:param snapshot_id: The unique identifier of an RDS snapshot.
If not provided, all RDS snapshots will be returned.
:type instance_id: str
:param instance_id: The identifier of a DBInstance. If provided,
only the DBSnapshots related to that instance will
be returned.
If not provided, all RDS snapshots will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot`
"""
params = {}
if snapshot_id:
params['DBSnapshotIdentifier'] = snapshot_id
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSnapshots', params,
[('DBSnapshot', DBSnapshot)])
def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None):
"""
Get all log files
:type instance_id: str
:param instance_id: The identifier of a DBInstance.
:type max_records: int
:param max_records: Number of log file names to return.
:type marker: str
:param marker: The marker provided by a previous request.
:file_size: int
:param file_size: Filter results to files large than this size in bytes.
:filename_contains: str
:param filename_contains: Filter results to files with filename containing this string
:file_last_written: int
:param file_last_written: Filter results to files written after this time (POSIX timestamp)
:rtype: list
:return: A list of :class:`boto.rds.logfile.LogFile`
"""
params = {'DBInstanceIdentifier': dbinstance_id}
if file_size:
params['FileSize'] = file_size
if filename_contains:
params['FilenameContains'] = filename_contains
if file_last_written:
params['FileLastWritten'] = file_last_written
if marker:
params['Marker'] = marker
if max_records:
params['MaxRecords'] = max_records
return self.get_list('DescribeDBLogFiles', params,
[('DescribeDBLogFilesDetails',LogFile)])
def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None):
"""
Download a log file from RDS
:type instance_id: str
:param instance_id: The identifier of a DBInstance.
:type log_file_name: str
:param log_file_name: The name of the log file to retrieve
:type marker: str
:param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If
no marker is specified, this will fetch log lines from the end of file instead.
:type number_of_lines: int
:param marker: The maximium number of lines to be returned.
"""
params = {
'DBInstanceIdentifier': dbinstance_id,
'LogFileName': log_file_name,
}
if marker:
params['Marker'] = marker
if number_of_lines:
params['NumberOfLines'] = number_of_lines
if max_records:
params['MaxRecords'] = max_records
logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject)
if logfile:
logfile.log_filename = log_file_name
logfile.dbinstance_id = dbinstance_id
return logfile
def create_dbsnapshot(self, snapshot_id, dbinstance_id):
"""
Create a new DB snapshot.
:type snapshot_id: string
:param snapshot_id: The identifier for the DBSnapshot
:type dbinstance_id: string
:param dbinstance_id: The source identifier for the RDS instance from
which the snapshot is created.
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
params = {'DBSnapshotIdentifier': snapshot_id,
'DBInstanceIdentifier': dbinstance_id}
return self.get_object('CreateDBSnapshot', params, DBSnapshot)
def copy_dbsnapshot(self, source_snapshot_id, target_snapshot_id):
"""
Copies the specified DBSnapshot.
:type source_snapshot_id: string
:param source_snapshot_id: The identifier for the source DB snapshot.
:type target_snapshot_id: string
:param target_snapshot_id: The identifier for the copied snapshot.
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot.
"""
params = {'SourceDBSnapshotIdentifier': source_snapshot_id,
'TargetDBSnapshotIdentifier': target_snapshot_id}
return self.get_object('CopyDBSnapshot', params, DBSnapshot)
def delete_dbsnapshot(self, identifier):
"""
Delete a DBSnapshot
:type identifier: string
:param identifier: The identifier of the DBSnapshot to delete
"""
params = {'DBSnapshotIdentifier': identifier}
return self.get_object('DeleteDBSnapshot', params, DBSnapshot)
def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
instance_class, port=None,
availability_zone=None,
multi_az=None,
auto_minor_version_upgrade=None,
db_subnet_group_name=None):
"""
Create a new DBInstance from a DB snapshot.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:type instance_id: string
:param instance_id: The source identifier for the RDS instance from
which the snapshot is created.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
Default is the API default.
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
automatically to the Read Replica
during the maintenance window.
Default is the API default.
:type db_subnet_group_name: str
:param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance.
If there is no DB Subnet Group, then it is a non-VPC DB
instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'DBSnapshotIdentifier': identifier,
'DBInstanceIdentifier': instance_id,
'DBInstanceClass': instance_class}
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
if multi_az is not None:
params['MultiAZ'] = str(multi_az).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(auto_minor_version_upgrade).lower()
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
return self.get_object('RestoreDBInstanceFromDBSnapshot',
params, DBInstance)
def restore_dbinstance_from_point_in_time(self, source_instance_id,
target_instance_id,
use_latest=False,
restore_time=None,
dbinstance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None):
"""
Create a new DBInstance from a point in time.
:type source_instance_id: string
:param source_instance_id: The identifier for the source DBInstance.
:type target_instance_id: string
:param target_instance_id: The identifier of the new DBInstance.
:type use_latest: bool
:param use_latest: If True, the latest snapshot availabile will
be used.
:type restore_time: datetime
:param restore_time: The date and time to restore from. Only
used if use_latest is False.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type db_subnet_group_name: str
:param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance.
If there is no DB Subnet Group, then it is a non-VPC DB
instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'SourceDBInstanceIdentifier': source_instance_id,
'TargetDBInstanceIdentifier': target_instance_id}
if use_latest:
params['UseLatestRestorableTime'] = 'true'
elif restore_time:
params['RestoreTime'] = restore_time.isoformat()
if dbinstance_class:
params['DBInstanceClass'] = dbinstance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
return self.get_object('RestoreDBInstanceToPointInTime',
params, DBInstance)
# Events
def get_all_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None,
max_records=None, marker=None):
"""
Get information about events related to your DBInstances,
DBSecurityGroups and DBParameterGroups.
:type source_identifier: str
:param source_identifier: If supplied, the events returned will be
limited to those that apply to the identified
source. The value of this parameter depends
on the value of source_type. If neither
parameter is specified, all events in the time
span will be returned.
:type source_type: str
:param source_type: Specifies how the source_identifier should
be interpreted. Valid values are:
b-instance | db-security-group |
db-parameter-group | db-snapshot
:type start_time: datetime
:param start_time: The beginning of the time interval for events.
If not supplied, all available events will
be returned.
:type end_time: datetime
:param end_time: The ending of the time interval for events.
If not supplied, all available events will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.event.Event`
"""
params = {}
if source_identifier and source_type:
params['SourceIdentifier'] = source_identifier
params['SourceType'] = source_type
if start_time:
params['StartTime'] = start_time.isoformat()
if end_time:
params['EndTime'] = end_time.isoformat()
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeEvents', params, [('Event', Event)])
def create_db_subnet_group(self, name, desc, subnet_ids):
"""
Create a new Database Subnet Group.
:type name: string
:param name: The identifier for the db_subnet_group
:type desc: string
:param desc: A description of the db_subnet_group
:type subnet_ids: list
:param subnets: A list of the subnet identifiers to include in the
db_subnet_group
:rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup
:return: the created db_subnet_group
"""
params = {'DBSubnetGroupName': name,
'DBSubnetGroupDescription': desc}
self.build_list_params(params, subnet_ids, 'SubnetIds.member')
return self.get_object('CreateDBSubnetGroup', params, DBSubnetGroup)
def delete_db_subnet_group(self, name):
"""
Delete a Database Subnet Group.
:type name: string
:param name: The identifier of the db_subnet_group to delete
:rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup`
:return: The deleted db_subnet_group.
"""
params = {'DBSubnetGroupName': name}
return self.get_object('DeleteDBSubnetGroup', params, DBSubnetGroup)
def get_all_db_subnet_groups(self, name=None, max_records=None, marker=None):
"""
Retrieve all the DBSubnetGroups in your account.
:type name: str
:param name: DBSubnetGroup name If supplied, only information about
this DBSubnetGroup will be returned. Otherwise, info
about all DBSubnetGroups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a Token will be
returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup`
"""
params = dict()
if name is not None:
params['DBSubnetGroupName'] = name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)])
def modify_db_subnet_group(self, name, description=None, subnet_ids=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBSubnetGroupName': name}
if description is not None:
params['DBSubnetGroupDescription'] = description
if subnet_ids is not None:
self.build_list_params(params, subnet_ids, 'SubnetIds.member')
return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup)
def create_option_group(self, name, engine_name, major_engine_version,
description=None):
"""
Create a new option group for your account.
This will create the option group within the region you
are currently connected to.
:type name: string
:param name: The name of the new option group
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be
associated with.
:type description: string
:param description: The description of the new option group
:rtype: :class:`boto.rds.optiongroup.OptionGroup`
:return: The newly created OptionGroup
"""
params = {
'OptionGroupName': name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': description,
}
group = self.get_object('CreateOptionGroup', params, OptionGroup)
group.name = name
group.engine_name = engine_name
group.major_engine_version = major_engine_version
group.description = description
return group
def delete_option_group(self, name):
"""
Delete an OptionGroup from your account.
:type key_name: string
:param key_name: The name of the OptionGroup to delete
"""
params = {'OptionGroupName': name}
return self.get_status('DeleteOptionGroup', params)
def describe_option_groups(self, name=None, engine_name=None,
major_engine_version=None, max_records=100,
marker=None):
"""
Describes the available option groups.
:type name: str
:param name: The name of the option group to describe. Cannot be
supplied together with engine_name or major_engine_version.
:type engine_name: str
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: datetime
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific
database engine version. If specified, then
engine_name must also be specified.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.optiongroup.OptionGroup`
"""
params = {}
if name:
params['OptionGroupName'] = name
elif engine_name and major_engine_version:
params['EngineName'] = engine_name
params['MajorEngineVersion'] = major_engine_version
if max_records:
params['MaxRecords'] = int(max_records)
if marker:
params['Marker'] = marker
return self.get_list('DescribeOptionGroups', params, [
('OptionGroup', OptionGroup)
])
def describe_option_group_options(self, engine_name=None,
major_engine_version=None, max_records=100,
marker=None):
"""
Describes the available option group options.
:type engine_name: str
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: datetime
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific
database engine version. If specified, then
engine_name must also be specified.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.optiongroup.Option`
"""
params = {}
if engine_name and major_engine_version:
params['EngineName'] = engine_name
params['MajorEngineVersion'] = major_engine_version
if max_records:
params['MaxRecords'] = int(max_records)
if marker:
params['Marker'] = marker
return self.get_list('DescribeOptionGroupOptions', params, [
('OptionGroupOptions', OptionGroupOption)
])
|
eeshangarg/zulip
|
refs/heads/master
|
zerver/migrations/0279_message_recipient_subject_indexes.py
|
5
|
# Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0278_remove_userprofile_alert_words"),
]
operations = [
migrations.RunSQL(
"""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""
),
migrations.RunSQL(
"""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""
),
]
|
timrudge/Platypus
|
refs/heads/master
|
ConvertPlateReader.py
|
1
|
import sys
import os
import datetime
import csv
usage = """usage:
ConvertPlateReader.py infile outfile row-name*
"""
def find_in_sublists(lst, value):
for sub_i, sublist in enumerate(lst):
try:
return (sub_i, sublist.index(value))
except ValueError:
pass
raise ValueError('%s is not in lists' % value)
if __name__=='__main__':
filename = sys.argv[1]
outfile = sys.argv[2]
rownames = sys.argv[3:]
nsets = len(rownames)
rawdata = list(csv.reader(open(filename, 'rU')))
# Work out location of header row, and column/row numbers
try:
(headerrow,ccol) = find_in_sublists(rawdata,'Well\nCol')
(headerrow,crow) = find_in_sublists(rawdata,'Well\nRow')
except ValueError:
(headerrow,ccol) = find_in_sublists(rawdata,'Well Col')
(headerrow,crow) = find_in_sublists(rawdata,'Well Row')
# Time is on row below headers
timerow = headerrow+1
# Data is on rows below time
datarow = timerow+1
# Find where the data is in each row by looking for 1st 'Raw Data'
headers = rawdata[headerrow]
# print headers
cdata = (i for i,v in enumerate(headers) if 'Raw Data' in v).next()
# print cdata
r = rawdata[datarow:]
r1 = r[1]
# print r1[cdata:]
# for d in r1[cdata:]:
# print map(float, d)
# Pull out row,col and data for each row
# Try combinations of alpha/numeric for row/col
try:
rows = [(ord(row[crow].upper())-64, int(row[ccol]), map(float, row[cdata:])) for row in rawdata[datarow:]]
except ValueError:
rows = [(int(row[ccol]), ord(row[crow].upper())-64, map(float, row[cdata:])) for row in rawdata[datarow:]]
# Pull out time of each data point
times = rawdata[timerow]
times = times[cdata:]
assert len(rows[0][2]) % nsets == 0
nsteps = len(rows[0][2]) / nsets
data = [['','row','col','t'] + rownames]
n = 1
for row, col, vals in rows:
for i in range(nsteps):
t = float(times[i])*60
line = [n, row, col, t]
for j in range(nsets):
k = j*nsteps + i
line.append(vals[k])
data.append(line)
n = n+1
outf = open(outfile, 'wb')
outf.write("# Automatically generated by ConvertPlateReader.py\n")
outf.write("# Source: " + filename + "\n")
outf.write("# Row Names: " + str(rownames) + "\n")
outf.write("# Date: " + str(datetime.datetime.now()) + "\n")
writer = csv.writer(outf, dialect='excel')
writer.writerows(data)
|
cts2/rf2db
|
refs/heads/master
|
bin/__init__.py
|
8
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
|
sixu05202004/newsmeme
|
refs/heads/master
|
newsmeme-blueprints/newsmeme/newsmeme/apps/comment/forms.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextAreaField, SubmitField
from wtforms.validators import Required
from flask.ext.babel import lazy_gettext as _
class CommentForm(Form):
comment = TextAreaField(validators=[
Required(message=_("Comment is required"))])
submit = SubmitField(_("Save"))
cancel = SubmitField(_("Cancel"))
class CommentAbuseForm(Form):
complaint = TextAreaField("Complaint", validators=[
Required(message=_("You must enter the details"
" of the complaint"))])
submit = SubmitField(_("Send"))
|
asm-products/cloudroutes-service
|
refs/heads/master
|
src/web/user/__init__.py
|
12133432
| |
github-account-because-they-want-it/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/google/__init__.py
|
12133432
| |
hryamzik/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/__init__.py
|
12133432
| |
unseenlaser/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/__init__.py
|
12133432
| |
gangadhar-kadam/verve-erp
|
refs/heads/v5.0
|
erpnext/hr/doctype/deduction_type/deduction_type.py
|
41
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DeductionType(Document):
pass
|
emorozov/django-basic-apps
|
refs/heads/master
|
basic/profiles/views.py
|
10
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.views.generic import list_detail
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from basic.profiles.models import *
from basic.profiles.forms import *
def profile_list(request):
return list_detail.object_list(
request,
queryset=Profile.objects.all(),
paginate_by=20,
)
profile_list.__doc__ = list_detail.object_list.__doc__
def profile_detail(request, username):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise Http404
profile = Profile.objects.get(user=user)
context = { 'object':profile }
return render_to_response('profiles/profile_detail.html', context, context_instance=RequestContext(request))
@login_required
def profile_edit(request, template_name='profiles/profile_form.html'):
"""Edit profile."""
if request.POST:
profile = Profile.objects.get(user=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=profile)
user_form = UserForm(request.POST, instance=request.user)
service_formset = ServiceFormSet(request.POST, instance=profile)
link_formset = LinkFormSet(request.POST, instance=profile)
if profile_form.is_valid() and user_form.is_valid() and service_formset.is_valid() and link_formset.is_valid():
profile_form.save()
user_form.save()
service_formset.save()
link_formset.save()
return HttpResponseRedirect(reverse('profile_detail', kwargs={'username': request.user.username}))
else:
context = {
'profile_form': profile_form,
'user_form': user_form,
'service_formset': service_formset,
'link_formset': link_formset
}
else:
profile = Profile.objects.get(user=request.user)
service_formset = ServiceFormSet(instance=profile)
link_formset = LinkFormSet(instance=profile)
context = {
'profile_form': ProfileForm(instance=profile),
'user_form': UserForm(instance=request.user),
'service_formset': service_formset,
'link_formset': link_formset
}
return render_to_response(template_name, context, context_instance=RequestContext(request))
|
trezorg/django
|
refs/heads/master
|
django/conf/locale/fy_NL/formats.py
|
1293
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/refactoring/changeSignature/keywordOnlyMove.after.py
|
83
|
def f<caret>(param2, *, param1):
pass
f(param2=2, param1=1)
|
ettm2012/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/core/src/multiarray/testcalcs.py
|
59
|
from scipy import weave
class YMD(object):
year = 0
month = 0
days = 0
month_offset = [
[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ],
[ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]
]
days_in_month = [
[ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ],
[ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
]
def is_leapyear(year):
return (year % 4 == 0) & ((year % 100 != 0) | (year % 400 == 0))
# Return the year offset, that is the absolute date of the day
# 31.12.(year-1) since 31.12.1969 in the proleptic Gregorian calendar.
def year_offset(year):
code = """
year-=1970;
if ((year+1969) >= 0 || -1/4 == -1)
return_val = year*365 + year/4 - year/100 + year/400;
else
return_val = year*365 + (year-3)/4 - (year-99)/100 + (year-399)/400;
"""
return weave.inline(code,['year'])
def days_from_ymd(year, month, day):
leap = is_leapyear(year)
# Negative month values indicate months relative to the years end */
if (month < 0): month += 13
if not (month >= 1 and month<=12):
raise ValueError, "month out of range (1-21): %d" % month
# Negative values indicate days relative to the months end */
if (day < 0): day += days_in_month[leap][month - 1] + 1
if not (day >= 1 and day <= days_in_month[leap][month-1]):
raise ValueError, "day out of range: %d" % day
# Number of days between Dec 31, (year - 1) and Dec 31, 1969
# (can be negative).
#
yearoffset = year_offset(year);
# Calculate the number of days using yearoffset */
# Jan 1, 1970 is day 0 and thus Dec. 31, 1969 is day -1 */
absdate = day-1 + month_offset[leap][month - 1] + yearoffset;
return absdate;
def ymd_from_days(days):
ymd = YMD()
year = 1970 + days / 365.2425
|
isaac-philip/loolu
|
refs/heads/master
|
common/django/contrib/localflavor/ro/ro_counties.py
|
428
|
# -*- coding: utf-8 -*-
"""
A list of Romanian counties as `choices` in a formfield.
This exists as a standalone file so that it's only imported into memory when
explicitly needed.
"""
COUNTIES_CHOICES = (
('AB', u'Alba'),
('AR', u'Arad'),
('AG', u'Argeş'),
('BC', u'Bacău'),
('BH', u'Bihor'),
('BN', u'Bistriţa-Năsăud'),
('BT', u'Botoşani'),
('BV', u'Braşov'),
('BR', u'Brăila'),
('B', u'Bucureşti'),
('BZ', u'Buzău'),
('CS', u'Caraş-Severin'),
('CL', u'Călăraşi'),
('CJ', u'Cluj'),
('CT', u'Constanţa'),
('CV', u'Covasna'),
('DB', u'Dâmboviţa'),
('DJ', u'Dolj'),
('GL', u'Galaţi'),
('GR', u'Giurgiu'),
('GJ', u'Gorj'),
('HR', u'Harghita'),
('HD', u'Hunedoara'),
('IL', u'Ialomiţa'),
('IS', u'Iaşi'),
('IF', u'Ilfov'),
('MM', u'Maramureş'),
('MH', u'Mehedinţi'),
('MS', u'Mureş'),
('NT', u'Neamţ'),
('OT', u'Olt'),
('PH', u'Prahova'),
('SM', u'Satu Mare'),
('SJ', u'Sălaj'),
('SB', u'Sibiu'),
('SV', u'Suceava'),
('TR', u'Teleorman'),
('TM', u'Timiş'),
('TL', u'Tulcea'),
('VS', u'Vaslui'),
('VL', u'Vâlcea'),
('VN', u'Vrancea'),
)
|
hazrpg/calibre
|
refs/heads/master
|
src/html5lib/trie/datrie.py
|
14
|
from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
try:
text_type = unicode
except NameError:
text_type = str
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
|
swannapa/erpnext
|
refs/heads/develop
|
erpnext/docs/user/manual/de/human-resources/setup/__init__.py
|
12133432
| |
ghedsouza/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/__init__.py
|
12133432
| |
RossBrunton/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/alter_fk/__init__.py
|
12133432
| |
farodin91/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo_exit_wsh.py
|
258
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
# This example handler accepts any request. See origin_check_wsh.py for how
# to reject access from untrusted scripts based on origin value.
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
if line == _GOODBYE_MESSAGE:
return
|
anant-dev/django
|
refs/heads/master
|
django/db/backends/postgresql/utils.py
|
682
|
from django.utils.timezone import utc
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
|
cloudfoundry/php-buildpack-legacy
|
refs/heads/master
|
builds/runtimes/python-2.7.6/lib/python2.7/distutils/tests/test_cmd.py
|
54
|
"""Tests for distutils.cmd."""
import unittest
import os
from test.test_support import captured_stdout, run_unittest
from distutils.cmd import Command
from distutils.dist import Distribution
from distutils.errors import DistutilsOptionError
from distutils import debug
class MyCmd(Command):
def initialize_options(self):
pass
class CommandTestCase(unittest.TestCase):
def setUp(self):
dist = Distribution()
self.cmd = MyCmd(dist)
def test_ensure_string_list(self):
cmd = self.cmd
cmd.not_string_list = ['one', 2, 'three']
cmd.yes_string_list = ['one', 'two', 'three']
cmd.not_string_list2 = object()
cmd.yes_string_list2 = 'ok'
cmd.ensure_string_list('yes_string_list')
cmd.ensure_string_list('yes_string_list2')
self.assertRaises(DistutilsOptionError,
cmd.ensure_string_list, 'not_string_list')
self.assertRaises(DistutilsOptionError,
cmd.ensure_string_list, 'not_string_list2')
cmd.option1 = 'ok,dok'
cmd.ensure_string_list('option1')
self.assertEqual(cmd.option1, ['ok', 'dok'])
cmd.option2 = ['xxx', 'www']
cmd.ensure_string_list('option2')
cmd.option3 = ['ok', 2]
self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
'option3')
def test_make_file(self):
cmd = self.cmd
# making sure it raises when infiles is not a string or a list/tuple
self.assertRaises(TypeError, cmd.make_file,
infiles=1, outfile='', func='func', args=())
# making sure execute gets called properly
def _execute(func, args, exec_msg, level):
self.assertEqual(exec_msg, 'generating out from in')
cmd.force = True
cmd.execute = _execute
cmd.make_file(infiles='in', outfile='out', func='func', args=())
def test_dump_options(self):
msgs = []
def _announce(msg, level):
msgs.append(msg)
cmd = self.cmd
cmd.announce = _announce
cmd.option1 = 1
cmd.option2 = 1
cmd.user_options = [('option1', '', ''), ('option2', '', '')]
cmd.dump_options()
wanted = ["command options for 'MyCmd':", ' option1 = 1',
' option2 = 1']
self.assertEqual(msgs, wanted)
def test_ensure_string(self):
cmd = self.cmd
cmd.option1 = 'ok'
cmd.ensure_string('option1')
cmd.option2 = None
cmd.ensure_string('option2', 'xxx')
self.assertTrue(hasattr(cmd, 'option2'))
cmd.option3 = 1
self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3')
def test_ensure_filename(self):
cmd = self.cmd
cmd.option1 = __file__
cmd.ensure_filename('option1')
cmd.option2 = 'xxx'
self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2')
def test_ensure_dirname(self):
cmd = self.cmd
cmd.option1 = os.path.dirname(__file__) or os.curdir
cmd.ensure_dirname('option1')
cmd.option2 = 'xxx'
self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2')
def test_debug_print(self):
cmd = self.cmd
with captured_stdout() as stdout:
cmd.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), '')
debug.DEBUG = True
try:
with captured_stdout() as stdout:
cmd.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), 'xxx\n')
finally:
debug.DEBUG = False
def test_suite():
return unittest.makeSuite(CommandTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
kinsamanka/linux
|
refs/heads/rpi-3.6.y
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
tclose/PyPe9
|
refs/heads/master
|
pype9/simulate/common/code_gen/base.py
|
2
|
"""
This module defines common methods used in simulator specific build modules
@author Tom Close
"""
##########################################################################
#
# Copyright 2011 Okinawa Institute of Science and Technology (OIST), Okinawa
#
##########################################################################
from __future__ import absolute_import
from builtins import object
from future.utils import PY3
import platform
import os
import subprocess as sp
import time
from itertools import chain
from copy import deepcopy
import shutil
from os.path import join
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from future.utils import with_metaclass
from abc import ABCMeta, abstractmethod
import sympy
from nineml import units
from nineml.exceptions import NineMLNameError, NineMLSerializationError
from pype9.exceptions import (
Pype9BuildError, Pype9CommandNotFoundError, Pype9RuntimeError)
from ..cells.with_synapses import read
import pype9.annotations
from pype9.annotations import PYPE9_NS, BUILD_PROPS
from os.path import expanduser
import re
from nineml.serialization import url_re
import sysconfig
from pype9 import __version__
from pype9.utils.paths import remove_ignore_missing
from pype9.utils.logging import logger
BASE_BUILD_DIR = os.path.join(
expanduser("~"),
'.pype9',
'build',
'v{}'.format(__version__),
'python{}'.format(sysconfig.get_config_var('py_version')))
class BaseCodeGenerator(with_metaclass(ABCMeta, object)):
"""
Parameters
----------
base_dir : str | None
The base directory for the generated code. If None a directory
will be created in user's home directory.
"""
BUILD_MODE_OPTIONS = ['lazy', # Build iff source has been updated
'force', # Build always
'require', # Don't build, requires pre-built
'build_only', # Only build
'generate_only', # Only generate source files
'purge' # Remove all configure files and rebuild
]
_PARAMS_DIR = 'params'
_SRC_DIR = 'src'
_INSTL_DIR = 'install'
_CMPL_DIR = 'compile' # Ignored for NEURON but used for NEST
_BUILT_COMP_CLASS = 'built_component_class.xml'
# Python functions and annotations to be made available in the templates
_globals = dict(
[('len', len), ('zip', zip), ('enumerate', enumerate),
('range', range), ('next', next), ('chain', chain), ('sorted',
sorted), ('hash', hash), ('deepcopy', deepcopy), ('units', units),
('hasattr', hasattr), ('set', set), ('list', list), ('None', None),
('sympy', sympy)] +
[(n, v) for n, v in list(pype9.annotations.__dict__.items())
if n != '__builtins__'])
# Derived classes should provide mapping from 9ml dimensions to default
# units
DEFAULT_UNITS = {}
def __init__(self, base_dir=None, **kwargs): # @UnusedVariable
if base_dir is None:
base_dir = BASE_BUILD_DIR
self._base_dir = os.path.join(
base_dir, self.SIMULATOR_NAME + self.SIMULATOR_VERSION)
def __repr__(self):
return "{}CodeGenerator(base_dir='{}')".format(
self.SIMULATOR_NAME.capitalize(), self.base_dir)
def __eq__(self, other):
try:
return (self.SIMULATOR_NAME == other.SIMULATOR_NAME and
self.base_dir == other.base_dir)
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def base_dir(self):
return self._base_dir
@abstractmethod
def generate_source_files(self, dynamics, src_dir, name, **kwargs):
"""
Generates the source files for the relevant simulator
"""
pass
def configure_build_files(self, name, src_dir, compile_dir, install_dir,
**kwargs):
"""
Configures the build files before compiling
"""
pass
@abstractmethod
def compile_source_files(self, compile_dir, name):
pass
def generate(self, component_class, build_mode='lazy', url=None, **kwargs):
"""
Generates and builds the required simulator-specific files for a given
NineML cell class
Parameters
----------
component_class : nineml.Dynamics
9ML Dynamics object
name : str
Name of the generated cell class
install_dir : str
Path to the directory where the NMODL files
will be generated and compiled
build_mode : str
Available build options:
lazy - only build if files are modified
force - always generate and build
purge - remove all config files, generate and rebuild
require - require built binaries are present
build_only - build and then quit
generate_only - generate src and then quit
recompile - don't generate src but compile
build_version : str
A suffix appended to the cell build name to distinguish
it from other code generated from the component class
url : str
The URL where the component class is stored (used to form the
build path)
kwargs : dict
A dictionary of (potentially simulator- specific) template
arguments
"""
# Save original working directory to reinstate it afterwards (just to
# be polite)
name = component_class.name
orig_dir = os.getcwd()
if url is None:
url = component_class.url
# Calculate compile directory path within build directory
src_dir = self.get_source_dir(name, url)
compile_dir = self.get_compile_dir(name, url)
install_dir = self.get_install_dir(name, url)
# Path of the build component class
built_comp_class_pth = os.path.join(src_dir, self._BUILT_COMP_CLASS)
# Determine whether the installation needs rebuilding or whether there
# is an existing library module to use.
if build_mode == 'purge':
remove_ignore_missing(src_dir)
remove_ignore_missing(install_dir)
remove_ignore_missing(compile_dir)
generate_source = compile_source = True
elif build_mode in ('force', 'build_only'): # Force build
generate_source = compile_source = True
elif build_mode == 'require': # Just check that prebuild is present
generate_source = compile_source = False
elif build_mode == 'generate_only': # Only generate
generate_source = True
compile_source = False
elif build_mode == 'lazy': # Generate if source has been modified
compile_source = True
if not os.path.exists(built_comp_class_pth):
generate_source = True
else:
try:
built_component_class = read(built_comp_class_pth)[name]
if built_component_class.equals(component_class,
annotations_ns=[PYPE9_NS]):
generate_source = False
logger.info("Found existing source in '{}' directory, "
"code generation skipped (set 'build_mode'"
" argument to 'force' or 'build_only' to "
"enforce regeneration)".format(src_dir))
else:
generate_source = True
logger.info("Found existing source in '{}' directory, "
"but the component classes differ so "
"regenerating sources".format(src_dir))
except (NineMLNameError, NineMLSerializationError):
generate_source = True
logger.info("Found existing source in '{}' directory, "
"but could not find '{}' component class so "
"regenerating sources".format(name, src_dir))
# Check if required directories are present depending on build_mode
elif build_mode == 'require':
if not os.path.exists(install_dir):
raise Pype9BuildError(
"Prebuilt installation directory '{}' is not "
"present, and is required for 'require' build option"
.format(install_dir))
else:
raise Pype9BuildError(
"Unrecognised build option '{}', must be one of ('{}')"
.format(build_mode, "', '".join(self.BUILD_MODE_OPTIONS)))
# Generate source files from NineML code
if generate_source:
self.clean_src_dir(src_dir, name)
self.generate_source_files(
name=name,
component_class=component_class,
src_dir=src_dir,
compile_dir=compile_dir,
install_dir=install_dir,
**kwargs)
component_class.write(built_comp_class_pth,
preserve_order=True, version=2.0)
if compile_source:
# Clean existing compile & install directories from previous builds
if generate_source:
self.clean_compile_dir(compile_dir,
purge=(build_mode == 'purge'))
self.configure_build_files(
name=name, src_dir=src_dir, compile_dir=compile_dir,
install_dir=install_dir, **kwargs)
self.clean_install_dir(install_dir)
self.compile_source_files(compile_dir, name)
# Switch back to original dir
os.chdir(orig_dir)
# Cache any dimension maps that were calculated during the generation
# process
return install_dir
def get_build_dir(self, name, url):
return os.path.join(self.base_dir, self.url_build_path(url), name)
def get_source_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._SRC_DIR))
def get_compile_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._CMPL_DIR))
def get_install_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._INSTL_DIR))
def clean_src_dir(self, src_dir, component_name): # @UnusedVariable
# Clean existing src directories from previous builds.
shutil.rmtree(src_dir, ignore_errors=True)
try:
os.makedirs(src_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create source directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(src_dir, e))
def clean_compile_dir(self, compile_dir, purge=False): # @UnusedVariable
# Clean existing compile & install directories from previous builds
shutil.rmtree(compile_dir, ignore_errors=True)
try:
os.makedirs(compile_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create compile directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(compile_dir, e))
def clean_install_dir(self, install_dir):
# Clean existing compile & install directories from previous builds
shutil.rmtree(install_dir, ignore_errors=True)
try:
os.makedirs(install_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create install directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(install_dir, e))
def render_to_file(self, template, args, filename, directory, switches={},
post_hoc_subs={}):
# Initialise the template loader to include the flag directories
template_paths = [
self.BASE_TMPL_PATH,
os.path.join(self.BASE_TMPL_PATH, 'includes')]
# Add include paths for various switches (e.g. solver type)
for name, value in list(switches.items()):
if value is not None:
template_paths.append(os.path.join(self.BASE_TMPL_PATH,
'includes', name, value))
# Add default path for template includes
template_paths.append(
os.path.join(self.BASE_TMPL_PATH, 'includes', 'default'))
# Initialise the Jinja2 environment
jinja_env = Environment(loader=FileSystemLoader(template_paths),
trim_blocks=True, lstrip_blocks=True,
undefined=StrictUndefined)
# Add some globals used by the template code
jinja_env.globals.update(**self._globals)
# Actually render the contents
contents = jinja_env.get_template(template).render(**args)
for old, new in list(post_hoc_subs.items()):
contents = contents.replace(old, new)
# Write the contents to file
with open(os.path.join(directory, filename), 'w') as f:
f.write(contents)
def path_to_utility(self, utility_name, env_var='', **kwargs): # @UnusedVariable @IgnorePep8
"""
Returns the full path to an executable by searching the "PATH"
environment variable
Parameters
----------
utility_name : str
Name of executable to search the execution path
env_var : str
Name of a environment variable to lookup first before searching
path
default : str | None
The default value to assign to the path if it cannot be found.
Returns
-------
utility_path : str
Full path to executable
"""
if kwargs and list(kwargs) != ['default']:
raise Pype9RuntimeError(
"Should only provide 'default' as kwarg to path_to_utility "
"provided ({})".format(kwargs))
try:
utility_path = os.environ[env_var]
except KeyError:
if platform.system() == 'Windows':
utility_name += '.exe'
# Get the system path
system_path = os.environ['PATH'].split(os.pathsep)
# Append NEST_INSTALL_DIR/NRNHOME if present
system_path.extend(self.simulator_specific_paths())
# Check the system path for the command
utility_path = None
for dr in system_path:
path = join(dr, utility_name)
if os.path.exists(path):
utility_path = path
break
if not utility_path:
try:
utility_path = kwargs['default']
except KeyError:
raise Pype9CommandNotFoundError(
"Could not find executable '{}' on the system path "
"'{}'".format(utility_name, ':'.join(system_path)))
else:
if not os.path.exists(utility_path):
raise Pype9CommandNotFoundError(
"Could not find executable '{}' at path '{}' provided by "
"'{}' environment variable"
.format(utility_name, env_var))
return utility_path
def simulator_specific_paths(self):
"""
To be overridden by derived classes if required.
"""
return []
def transform_for_build(self, name, component_class, **kwargs): # @UnusedVariable @IgnorePep8
"""
Copies and transforms the component class to match the format of the
simulator (overridden in derived class)
Parameters
----------
name : str
The name of the transformed component class
component_class : nineml.Dynamics
The component class to be transformed
"""
# ---------------------------------------------------------------------
# Clone original component class and properties
# ---------------------------------------------------------------------
component_class = component_class.clone()
component_class.name = name
self._set_build_props(component_class, **kwargs)
return component_class
def _set_build_props(self, component_class, **build_props):
"""
Sets the build properties in the component class annotations
Parameters
----------
component_class : Dynamics | MultiDynamics
The build component class
build_props : dict(str, str)
Build properties to save into the annotations of the build
component class
"""
for k, v in list(build_props.items()) + [
('version', pype9.__version__)]:
component_class.annotations.set((BUILD_PROPS, PYPE9_NS), k, v)
def run_command(self, cmd, fail_msg=None, **kwargs):
env = os.environ.copy()
try:
process = sp.Popen(cmd, stdout=sp.PIPE,
stderr=sp.PIPE, env=env, **kwargs)
stdout, stderr = process.communicate()
if PY3:
stdout = str(stdout.decode('utf-8'))
stderr = str(stderr.decode('utf-8'))
logger.debug("'{}' stdout:\n{}".format(cmd, stdout))
logger.debug("'{}' stderr:\n{}".format(cmd, stderr))
except sp.CalledProcessError as e:
if fail_msg is None:
raise
else:
msg = fail_msg.format(e)
raise Pype9BuildError(msg)
return stdout, stderr
@classmethod
def get_mod_time(cls, url):
if url is None:
mod_time = time.ctime(0) # Return the earliest date if no url
else:
mod_time = time.ctime(os.path.getmtime(url))
return mod_time
@classmethod
def url_build_path(cls, url):
if url is None:
path = 'generated'
else:
if url_re.match(url) is not None:
path = os.path.join(
'url',
re.match(r'(:?\w+://)?([\.\/\w]+).*', url).group(1))
else:
path = os.path.join('file', os.path.realpath(url)[1:])
return path
def load_libraries(self, name, url, **kwargs):
"""
To be overridden by derived classes to allow the model to be loaded
from compiled external libraries
"""
pass
|
jmartu/testing
|
refs/heads/master
|
venv/lib/python3.6/site-packages/pip/_vendor/requests/status_codes.py
|
481
|
# -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
|
cyisfor/Python-Markdown
|
refs/heads/master
|
markdown/inlinepatterns.py
|
65
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from . import odict
import re
try: # pragma: no cover
from urllib.parse import urlparse, urlunparse
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse
try: # pragma: no cover
from html import entities
except ImportError: # pragma: no cover
import htmlentitydefs as entities
def build_inlinepatterns(md_instance, **kwargs):
""" Build the default set of inline patterns for Markdown. """
inlinePatterns = odict.OrderedDict()
inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
inlinePatterns["image_reference"] = ImageReferencePattern(
IMAGE_REFERENCE_RE, md_instance
)
inlinePatterns["short_reference"] = ReferencePattern(
SHORT_REF_RE, md_instance
)
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
if md_instance.safeMode != 'escape':
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis:
inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
else:
inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
return inlinePatterns
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = (
r'\[(' +
(NOBRACKET + r'(\[')*6 +
(NOBRACKET + r'\])*')*6 +
NOBRACKET + r')\]'
)
NOIMG = r'(?<!\!)'
# `e=f()` or ``e=f("`")``
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)'
# \<
ESCAPE_RE = r'\\(.)'
# *emphasis*
EMPHASIS_RE = r'(\*)([^\*]+)\2'
# **strong**
STRONG_RE = r'(\*{2}|_{2})(.+?)\2'
# ***strongem*** or ***em*strong**
EM_STRONG_RE = r'(\*|_)\2{2}(.+?)\2(.*?)\2{2}'
# ***strong**em*
STRONG_EM_RE = r'(\*|_)\2{2}(.+?)\2{2}(.*?)\2'
# _smart_emphasis_
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)'
# _emphasis_
EMPHASIS_2_RE = r'(_)(.+?)\2'
# [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
#  or 
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
# [Google][3]
REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
# [Google]
SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
# ![alt text][2]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
# stand-alone * or _
NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
# <http://www.123.com>
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
# <me@example.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
# <...>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
# &
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
# two spaces at end of line
LINE_BREAK_RE = r' \n'
def dequote(string):
"""Remove quotes from around a string."""
if ((string.startswith('"') and string.endswith('"')) or
(string.startswith("'") and string.endswith("'"))):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern(object):
"""Base class that inline patterns subclass. """
def __init__(self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp(self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass # pragma: no cover
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def itertext(el): # pragma: no cover
' Reimplement Element.itertext for older python versions '
tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None:
return
if el.text:
yield el.text
for e in el:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, util.string_type):
return value
else:
# An etree Element - return text content only
return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class SimpleTextPattern(Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
return m.group(2)
class EscapePattern(Pattern):
""" Return an escaped character. """
def handleMatch(self, m):
char = m.group(2)
if char in self.markdown.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX)
else:
return None
class SimpleTagPattern(Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__(self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern(SimpleTagPattern):
""" Return an element of type `tag` with no children. """
def handleMatch(self, m):
return util.etree.Element(self.tag)
class BacktickPattern(Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = util.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern(SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3)
if len(m.groups()) == 5:
el2.tail = m.group(4)
return el1
class HtmlPattern(Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch(self, m):
rawhtml = self.unescape(m.group(2))
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m):
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.markdown.serializer(value)
except:
return '\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class LinkPattern(Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("a")
el.text = m.group(2)
title = m.group(13)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(self.unescape(href.strip())))
else:
el.set("href", "")
if title:
title = dequote(self.unescape(title))
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
if not self.markdown.safeMode:
# Return immediately bipassing parsing.
return url
try:
scheme, netloc, path, params, query, fragment = url = urlparse(url)
except ValueError: # pragma: no cover
# Bad url - so bad it couldn't be parsed.
return ''
locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe.
return ''
if netloc == '' and scheme not in locless_schemes: # pragma: no cover
# This should not happen. Treat as suspect.
return ''
for part in url[2:]:
if ":" in part:
# A colon in "path", "parameters", "query"
# or "fragment" is suspect.
return ''
# Url passes all tests. Return url as-is.
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(self.unescape(src)))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
if self.markdown.enable_attributes:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', self.unescape(truealt))
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
def handleMatch(self, m):
try:
id = m.group(9).lower()
except IndexError:
id = None
if not id:
# if we got something like "[Google][]" or "[Goggle]"
# we'll use "google" as the id
id = m.group(2).lower()
# Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if id not in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = util.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern(ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = util.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
if self.markdown.enable_attributes:
text = handleAttributes(text, el)
el.set("alt", self.unescape(text))
return el
class AutolinkPattern(Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = util.etree.Element("a")
el.set('href', self.unescape(m.group(2)))
el.text = util.AtomicString(m.group(2))
return el
class AutomailPattern(Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = util.etree.Element('a')
email = self.unescape(m.group(2))
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = entities.codepoint2name.get(code)
if entity:
return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = util.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
waxmanr/moose
|
refs/heads/devel
|
framework/contrib/nsiqcppstyle/rules/RULE_5_2_C_provide_doxygen_class_comment_on_class_def.py
|
43
|
"""
Provide the class doxygen comment.
It checks if there is doxygen sytle comment in front of each class definition.
== Violation ==
class A { <== Violation. No doxygen comment.
};
/* <== Violation. It's not a doxygen comment
*
*/
class B {
};
== Good ==
/**
* blar blar
*/
class A { <== OK
};
class B; <== Don't care. It's forward decl.
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext) :
if not decl and currentType == "CLASS" and typeContext != None:
t = lexer.GetCurToken()
lexer.PushTokenIndex()
t2 = lexer.GetPrevTokenInType("COMMENT")
lexer.PopTokenIndex()
lexer.PushTokenIndex()
t3 = lexer.GetPrevTokenInTypeList(["LBRACE", "SEMI", "PREPROCESSOR"], False, True)
lexer.PopTokenIndex()
if t2 != None and t2.additional == "DOXYGEN" :
if t3 == None or t2.lexpos > t3.lexpos :
return
nsiqcppstyle_reporter.Error(t, __name__, "Doxygen Comment should be provided in front of class def(%s)." % fullName)
ruleManager.AddTypeNameRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
class A {
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
/*
*/
class K {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
/**
*/
class K {
class T {
}
}
""")
assert CheckErrorContent(__name__)
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
/**
*
*/
class J {
int k;
/**
*/
class T {
}
}
class T;
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
/*
*/
struct K {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("thisfile.c",
"""
/**
*/
template<class A, class B>
class K {
}
""")
assert not CheckErrorContent(__name__)
|
optima-ict/odoo
|
refs/heads/9.0
|
addons/hr/res_users.py
|
46
|
from openerp import api
from openerp.osv import fields, osv
class res_users(osv.Model):
""" Update of res.users class
- add field for the related employee of the user
- if adding groups to an user, check if base.group_user is in it (member of
'Employee'), create an employee form linked to it. """
_name = 'res.users'
_inherit = ['res.users']
_columns = {
'employee_ids': fields.one2many('hr.employee', 'user_id', 'Related employees'),
}
def _message_post_get_eid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context = dict(context or {})
context['thread_model'] = 'hr.employee'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', thread_id)], context=context)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users to the related employee.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if kwargs.get('message_type') == 'email':
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
res = None
employee_ids = self._message_post_get_eid(cr, uid, thread_id, context=context)
if not employee_ids: # no employee: fall back on previous behavior
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
for employee_id in employee_ids:
res = self.pool.get('hr.employee').message_post(cr, uid, employee_id, context=context, **kwargs)
return res
|
joshkh/intermine
|
refs/heads/beta
|
bio/scripts/humanmine/gen_ncbi_linkouts.py
|
12
|
#!/usr/bin/env python
# This is an automatically generated script to run your query
# to use it you will require the intermine python client.
# To install the client, run the following command from a terminal:
#
# sudo easy_install intermine
#
# For further documentation you can visit:
# http://intermine.readthedocs.org/en/latest/web-services/
# The following two lines will be needed in every python script:
from intermine.webservice import Service
service = Service("http://humanmine.org/humanmine/service")
# Get a new query on the class (table) you will be querying:
query = service.new_query("Gene")
# The view specifies the output columns
query.add_view("primaryIdentifier", "crossReferences.identifier")
# Uncomment and edit the line below (the default) to select a custom sort order:
# query.add_sort_order("Gene.primaryIdentifier", "ASC")
# You can edit the constraint values below
query.add_constraint("crossReferences.source.name", "=", "NCBI", code = "A")
query.add_constraint("crossReferences.identifier", "!=", "Entrez*", code = "B")
query.add_constraint("organism.name", "=", "Homo sapiens", code = "C")
query.add_constraint("primaryIdentifier", "CONTAINS", "ENSG", code = "D")
############################################################################
prefix = "prid: 9169\n"
prefix = prefix + "dbase: gene\n"
prefix = prefix + "!base: http://www.humanmine.org/humanmine/portal.do?class=Gene&externalids=\n"
############################################################################
f = open('resources.ft','w')
f.write(prefix)
for row in query.rows():
f.write("-------------------------------------------------------------------\n")
f.write("linkid: " + row["primaryIdentifier"] + "\n")
f.write("uids: " + row["crossReferences.identifier"] + "\n")
f.write("base: &base.url;\n")
f.write("rule: " + row["primaryIdentifier"] + "\n")
f.close()
|
vimalkvn/riboseqr_wrapper
|
refs/heads/master
|
riboseqr/__init__.py
|
1349
|
# -*- coding: utf-8 -*-
|
HubSpot/vitess
|
refs/heads/hubspot-client
|
py/util/static_auth_client.py
|
8
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class StaticAuthClientCreds():
"""Metadata wrapper for StaticAuthClientCreds."""
def __init__(self, auth_static_client_creds):
self._credentials = auth_static_client_creds
with open(self._credentials) as data_file:
self._data = json.load(data_file)
def metadata(self):
return (('username', self._data['Username']), ('password', self._data['Password']),)
|
sigma-random/scrapy
|
refs/heads/master
|
tests/test_utils_signal.py
|
15
|
from twisted.trial import unittest
from twisted.python import log as txlog
from twisted.python.failure import Failure
from twisted.internet import defer, reactor
from scrapy.xlib.pydispatch import dispatcher
from scrapy.utils.signal import send_catch_log, send_catch_log_deferred
from scrapy import log
class SendCatchLogTest(unittest.TestCase):
@defer.inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
def log_received(event):
handlers_called.add(log_received)
assert "error_handler" in event['message'][0]
assert event['logLevel'] == log.ERROR
txlog.addObserver(log_received)
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
result = yield defer.maybeDeferred(self._get_result, test_signal, arg='test', \
handlers_called=handlers_called)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
assert log_received in handlers_called
self.assertEqual(result[0][0], self.error_handler)
self.assert_(isinstance(result[0][1], Failure))
self.assertEqual(result[1], (self.ok_handler, "OK"))
txlog.removeObserver(log_received)
self.flushLoggedErrors()
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
a = 1/0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
return "OK"
class SendCatchLogDeferredTest(SendCatchLogTest):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogDeferredTest2(SendCatchLogTest):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
d = defer.Deferred()
reactor.callLater(0, d.callback, "OK")
return d
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogTest2(unittest.TestCase):
def test_error_logged_if_deferred_not_supported(self):
test_signal = object()
test_handler = lambda: defer.Deferred()
log_events = []
txlog.addObserver(log_events.append)
dispatcher.connect(test_handler, test_signal)
send_catch_log(test_signal)
self.assertTrue(log_events)
self.assertIn("Cannot return deferreds from signal handler", str(log_events))
txlog.removeObserver(log_events.append)
self.flushLoggedErrors()
dispatcher.disconnect(test_handler, test_signal)
|
zx8/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/nowtv.py
|
9
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
parse_duration,
remove_start,
)
class NowTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nowtv\.de/(?P<station>rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/player'
_TESTS = [{
# rtl
'url': 'http://www.nowtv.de/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/player',
'info_dict': {
'id': '203519',
'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
'ext': 'mp4',
'title': 'Die neuen Bauern und eine Hochzeit',
'description': 'md5:e234e1ed6d63cf06be5c070442612e7e',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432580700,
'upload_date': '20150525',
'duration': 2786,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# rtl2
'url': 'http://www.nowtv.de/rtl2/berlin-tag-nacht/berlin-tag-nacht-folge-934/player',
'info_dict': {
'id': '203481',
'display_id': 'berlin-tag-nacht/berlin-tag-nacht-folge-934',
'ext': 'mp4',
'title': 'Berlin - Tag & Nacht (Folge 934)',
'description': 'md5:c85e88c2e36c552dfe63433bc9506dd0',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432666800,
'upload_date': '20150526',
'duration': 2641,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# rtlnitro
'url': 'http://www.nowtv.de/rtlnitro/alarm-fuer-cobra-11-die-autobahnpolizei/hals-und-beinbruch-2014-08-23-21-10-00/player',
'info_dict': {
'id': '165780',
'display_id': 'alarm-fuer-cobra-11-die-autobahnpolizei/hals-und-beinbruch-2014-08-23-21-10-00',
'ext': 'mp4',
'title': 'Hals- und Beinbruch',
'description': 'md5:b50d248efffe244e6f56737f0911ca57',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432415400,
'upload_date': '20150523',
'duration': 2742,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# superrtl
'url': 'http://www.nowtv.de/superrtl/medicopter-117/angst/player',
'info_dict': {
'id': '99205',
'display_id': 'medicopter-117/angst',
'ext': 'mp4',
'title': 'Angst!',
'description': 'md5:30cbc4c0b73ec98bcd73c9f2a8c17c4e',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1222632900,
'upload_date': '20080928',
'duration': 3025,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# ntv
'url': 'http://www.nowtv.de/ntv/ratgeber-geld/thema-ua-der-erste-blick-die-apple-watch/player',
'info_dict': {
'id': '203521',
'display_id': 'ratgeber-geld/thema-ua-der-erste-blick-die-apple-watch',
'ext': 'mp4',
'title': 'Thema u.a.: Der erste Blick: Die Apple Watch',
'description': 'md5:4312b6c9d839ffe7d8caf03865a531af',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432751700,
'upload_date': '20150527',
'duration': 1083,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# vox
'url': 'http://www.nowtv.de/vox/der-hundeprofi/buero-fall-chihuahua-joel/player',
'info_dict': {
'id': '128953',
'display_id': 'der-hundeprofi/buero-fall-chihuahua-joel',
'ext': 'mp4',
'title': "Büro-Fall / Chihuahua 'Joel'",
'description': 'md5:e62cb6bf7c3cc669179d4f1eb279ad8d',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432408200,
'upload_date': '20150523',
'duration': 3092,
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
station = mobj.group('station')
info = self._download_json(
'https://api.nowtv.de/v3/movies/%s?fields=*,format,files' % display_id,
display_id)
video_id = compat_str(info['id'])
files = info['files']
if not files:
if info.get('geoblocked', False):
raise ExtractorError(
'Video %s is not available from your location due to geo restriction' % video_id,
expected=True)
if not info.get('free', True):
raise ExtractorError(
'Video %s is not available for free' % video_id, expected=True)
f = info.get('format', {})
station = f.get('station') or station
STATIONS = {
'rtl': 'rtlnow',
'rtl2': 'rtl2now',
'vox': 'voxnow',
'nitro': 'rtlnitronow',
'ntv': 'n-tvnow',
'superrtl': 'superrtlnow'
}
formats = []
for item in files['items']:
item_path = remove_start(item['path'], '/')
tbr = int_or_none(item['bitrate'])
m3u8_url = 'http://hls.fra.%s.de/hls-vod-enc/%s.m3u8' % (STATIONS[station], item_path)
m3u8_url = m3u8_url.replace('now/', 'now/videos/')
formats.append({
'url': m3u8_url,
'format_id': '%s-%sk' % (item['id'], tbr),
'ext': 'mp4',
'tbr': tbr,
})
self._sort_formats(formats)
title = info['title']
description = info.get('articleLong') or info.get('articleShort')
timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ')
duration = parse_duration(info.get('duration'))
thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
|
kalxas/QGIS
|
refs/heads/master
|
python/plugins/processing/gui/BatchAlgorithmDialog.py
|
25
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BatchAlgorithmDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from pprint import pformat
import time
from qgis.PyQt.QtWidgets import QPushButton, QDialogButtonBox
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.core import (QgsProcessingOutputHtml,
QgsProcessingOutputNumber,
QgsProcessingOutputString,
QgsProcessingOutputBoolean,
QgsProject,
QgsProcessingMultiStepFeedback,
QgsScopedProxyProgressTask,
QgsProcessingException)
from qgis.gui import QgsProcessingAlgorithmDialogBase
from qgis.utils import OverrideCursor, iface
from processing.gui.BatchPanel import BatchPanel
from processing.gui.AlgorithmExecutor import execute
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.core.ProcessingResults import resultsList
from processing.tools.system import getTempFilename
from processing.tools import dataobjects
import codecs
class BatchFeedback(QgsProcessingMultiStepFeedback):
def __init__(self, steps, feedback):
super().__init__(steps, feedback)
self.errors = []
def reportError(self, error: str, fatalError: bool = False):
self.errors.append(error)
super().reportError(error, fatalError)
class BatchAlgorithmDialog(QgsProcessingAlgorithmDialogBase):
def __init__(self, alg, parent=None):
super().__init__(parent)
self.setAlgorithm(alg)
self.setWindowTitle(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
self.setMainWidget(BatchPanel(self, self.algorithm()))
self.hideShortHelp()
self.btnRunSingle = QPushButton(QCoreApplication.translate('BatchAlgorithmDialog', "Run as Single Process…"))
self.btnRunSingle.clicked.connect(self.runAsSingle)
self.buttonBox().addButton(self.btnRunSingle, QDialogButtonBox.ResetRole) # reset role to ensure left alignment
self.updateRunButtonVisibility()
def runAsSingle(self):
self.close()
from processing.gui.AlgorithmDialog import AlgorithmDialog
dlg = AlgorithmDialog(self.algorithm().create(), parent=iface.mainWindow())
dlg.show()
dlg.exec_()
def resetAdditionalGui(self):
self.btnRunSingle.setEnabled(True)
def blockAdditionalControlsWhileRunning(self):
self.btnRunSingle.setEnabled(False)
def runAlgorithm(self):
alg_parameters = []
feedback = self.createFeedback()
load_layers = self.mainWidget().checkLoadLayersOnCompletion.isChecked()
project = QgsProject.instance() if load_layers else None
for row in range(self.mainWidget().batchRowCount()):
parameters, ok = self.mainWidget().parametersForRow(row, destinationProject=project, warnOnInvalid=True)
if ok:
alg_parameters.append(parameters)
if not alg_parameters:
return
task = QgsScopedProxyProgressTask(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
multi_feedback = BatchFeedback(len(alg_parameters), feedback)
feedback.progressChanged.connect(task.setProgress)
algorithm_results = []
errors = []
with OverrideCursor(Qt.WaitCursor):
self.blockControlsWhileRunning()
self.setExecutedAnyResult(True)
self.cancelButton().setEnabled(True)
# Make sure the Log tab is visible before executing the algorithm
try:
self.showLog()
self.repaint()
except Exception: # FIXME which one?
pass
start_time = time.time()
for count, parameters in enumerate(alg_parameters):
if feedback.isCanceled():
break
self.setProgressText(
QCoreApplication.translate('BatchAlgorithmDialog', '\nProcessing algorithm {0}/{1}…').format(
count + 1, len(alg_parameters)))
self.setInfo(self.tr('<b>Algorithm {0} starting…</b>').format(self.algorithm().displayName()),
escapeHtml=False)
multi_feedback.setCurrentStep(count)
parameters = self.algorithm().preprocessParameters(parameters)
feedback.pushInfo(self.tr('Input parameters:'))
feedback.pushCommandInfo(pformat(parameters))
feedback.pushInfo('')
# important - we create a new context for each iteration
# this avoids holding onto resources and layers from earlier iterations,
# and allows batch processing of many more items then is possible
# if we hold on to these layers
context = dataobjects.createContext(feedback)
alg_start_time = time.time()
multi_feedback.errors = []
results, ok = self.algorithm().run(parameters, context, multi_feedback)
if ok:
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} correctly executed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.pushInfo(
self.tr('Execution completed in {0:0.2f} seconds'.format(time.time() - alg_start_time)))
feedback.pushInfo(self.tr('Results:'))
feedback.pushCommandInfo(pformat(results))
feedback.pushInfo('')
algorithm_results.append({'parameters': parameters, 'results': results})
handleAlgorithmResults(self.algorithm(), context, multi_feedback, False, parameters)
else:
err = [e for e in multi_feedback.errors]
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} failed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.reportError(
self.tr('Execution failed after {0:0.2f} seconds'.format(time.time() - alg_start_time)),
fatalError=False)
errors.append({'parameters': parameters, 'errors': err})
feedback.pushInfo(self.tr('Batch execution completed in {0:0.2f} seconds'.format(time.time() - start_time)))
if errors:
feedback.reportError(self.tr('{} executions failed. See log for further details.').format(len(errors)), fatalError=True)
task = None
self.finish(algorithm_results, errors)
self.cancelButton().setEnabled(False)
def finish(self, algorithm_results, errors):
for count, results in enumerate(algorithm_results):
self.loadHTMLResults(results['results'], count)
self.createSummaryTable(algorithm_results, errors)
self.resetGui()
def loadHTMLResults(self, results, num):
for out in self.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputHtml) and out.name() in results and results[out.name()]:
resultsList.addResult(icon=self.algorithm().icon(), name='{} [{}]'.format(out.description(), num),
result=results[out.name()])
def createSummaryTable(self, algorithm_results, errors):
createTable = False
for out in self.algorithm().outputDefinitions():
if isinstance(out, (QgsProcessingOutputNumber, QgsProcessingOutputString, QgsProcessingOutputBoolean)):
createTable = True
break
if not createTable and not errors:
return
outputFile = getTempFilename('html')
with codecs.open(outputFile, 'w', encoding='utf-8') as f:
if createTable:
for i, res in enumerate(algorithm_results):
results = res['results']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(),
params[param.name()]))
f.write('</table>\n')
f.write(self.tr('<h3>Results</h3>\n'))
f.write('<table>\n')
for out in self.algorithm().outputDefinitions():
if out.name() in results:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(out.description(), results[out.name()]))
f.write('</table>\n')
if errors:
f.write('<h2 style="color: red">{}</h2>\n'.format(self.tr('Errors')))
for i, res in enumerate(errors):
errors = res['errors']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write(
'<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(), params[param.name()]))
f.write('</table>\n')
f.write('<h3>{}</h3>\n'.format(self.tr('Error')))
f.write('<p style="color: red">{}</p>\n'.format('<br>'.join(errors)))
resultsList.addResult(icon=self.algorithm().icon(),
name='{} [summary]'.format(self.algorithm().name()), timestamp=time.localtime(),
result=outputFile)
|
Vagab0nd/SiCKRAGE
|
refs/heads/master
|
lib3/oauthlib/oauth1/rfc5849/endpoints/resource.py
|
10
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
import logging
from .. import errors
from .base import BaseEndpoint
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['realm'] = valid_realm
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
|
asser/django
|
refs/heads/master
|
tests/foreign_object/models/person.py
|
64
|
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Country(models.Model):
# Table Column Fields
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
person_country_id = models.IntegerField()
# Relation Fields
person_country = models.ForeignObject(
Country,
from_fields=['person_country_id'],
to_fields=['id'],
on_delete=models.CASCADE,
)
friends = models.ManyToManyField('self', through='Friendship', symmetrical=False)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
group_country = models.ForeignKey(Country, models.CASCADE)
members = models.ManyToManyField(Person, related_name='groups', through='Membership')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
# Table Column Fields
membership_country = models.ForeignKey(Country, models.CASCADE)
date_joined = models.DateTimeField(default=datetime.datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
person_id = models.IntegerField()
group_id = models.IntegerField(blank=True, null=True)
# Relation Fields
person = models.ForeignObject(
Person,
from_fields=['person_id', 'membership_country'],
to_fields=['id', 'person_country_id'],
on_delete=models.CASCADE,
)
group = models.ForeignObject(
Group,
from_fields=['group_id', 'membership_country'],
to_fields=['id', 'group_country'],
on_delete=models.CASCADE,
)
class Meta:
ordering = ('date_joined', 'invite_reason')
def __str__(self):
group_name = self.group.name if self.group_id else 'NULL'
return "%s is a member of %s" % (self.person.name, group_name)
class Friendship(models.Model):
# Table Column Fields
from_friend_country = models.ForeignKey(Country, models.CASCADE, related_name="from_friend_country")
from_friend_id = models.IntegerField()
to_friend_country_id = models.IntegerField()
to_friend_id = models.IntegerField()
# Relation Fields
from_friend = models.ForeignObject(
Person,
on_delete=models.CASCADE,
from_fields=['from_friend_country', 'from_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='from_friend')
to_friend_country = models.ForeignObject(
Country,
from_fields=['to_friend_country_id'],
to_fields=['id'],
related_name='to_friend_country',
on_delete=models.CASCADE,
)
to_friend = models.ForeignObject(
Person,
from_fields=['to_friend_country_id', 'to_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='to_friend',
on_delete=models.CASCADE,
)
|
the-zebulan/CodeWars
|
refs/heads/master
|
katas/kyu_4/human_readable_duration_format.py
|
1
|
IN_SECONDS = (('year', 31536000), ('day', 86400), ('hour', 3600),
('minute', 60), ('second', 1))
def format_duration(seconds):
if not seconds:
return 'now'
times = []
words = 0
for name, num in IN_SECONDS:
q, seconds = divmod(seconds, num)
if q:
times.append('{} {}{}'.format(q, name, 's' if q > 1 else ''))
words += 1
return times[0] if words == 1 else \
'{} and {}'.format(', '.join(times[:-1]), times[-1])
|
mkennedy04/knodj
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
stvstnfrd/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/credentials/settings/__init__.py
|
12133432
| |
StackStorm/st2
|
refs/heads/master
|
st2api/st2api/controllers/exp/__init__.py
|
12133432
| |
UNINETT/nav
|
refs/heads/master
|
python/nav/metrics/__init__.py
|
2
|
# Copyright (C) 2013 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""
This package encompasses all NAV APIs to send and retrieve metrics and
graphs from Graphite.
"""
from nav.config import NAVConfigParser
class GraphiteConfigParser(NAVConfigParser):
"""Parser for NAV's graphite related configuration"""
DEFAULT_CONFIG_FILES = ['graphite.conf']
DEFAULT_CONFIG = u"""
[carbon]
host = 127.0.0.1
port = 2003
[graphiteweb]
base=http://localhost:8000/
format=png
"""
CONFIG = GraphiteConfigParser()
CONFIG.read_all()
|
ianyh/heroku-buildpack-python-opencv
|
refs/heads/master
|
vendor/.heroku/lib/python2.7/encodings/zlib_codec.py
|
533
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
whaleygeek/mb_sdcard
|
refs/heads/master
|
src/microbit/auto_serial/serial/serialposix.py
|
141
|
#!/usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# module for serial IO for POSIX compatible systems, like Linux
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
import sys, os, fcntl, termios, struct, select, errno, time
from serial.serialutil import *
# Do check the Python version as some constants have moved.
if (sys.hexversion < 0x020100f0):
import TERMIOS
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL
else:
FCNTL = fcntl
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed)
def device(port):
return '/dev/ttyS%d' % port
TCGETS2 = 0x802C542A
TCSETS2 = 0x402C542B
BOTHER = 0o010000
def set_special_baudrate(port, baudrate):
# right size is 44 on x86_64, allow for some growth
import array
buf = array.array('i', [0] * 64)
try:
# get serial_struct
FCNTL.ioctl(port.fd, TCGETS2, buf)
# set custom speed
buf[2] &= ~TERMIOS.CBAUD
buf[2] |= BOTHER
buf[9] = buf[10] = baudrate
# set serial_struct
res = FCNTL.ioctl(port.fd, TCSETS2, buf)
except IOError, e:
raise ValueError('Failed to set custom baud rate (%s): %s' % (baudrate, e))
baudrate_constants = {
0: 0000000, # hang up
50: 0000001,
75: 0000002,
110: 0000003,
134: 0000004,
150: 0000005,
200: 0000006,
300: 0000007,
600: 0000010,
1200: 0000011,
1800: 0000012,
2400: 0000013,
4800: 0000014,
9600: 0000015,
19200: 0000016,
38400: 0000017,
57600: 0010001,
115200: 0010002,
230400: 0010003,
460800: 0010004,
500000: 0010005,
576000: 0010006,
921600: 0010007,
1000000: 0010010,
1152000: 0010011,
1500000: 0010012,
2000000: 0010013,
2500000: 0010014,
3000000: 0010015,
3500000: 0010016,
4000000: 0010017
}
elif plat == 'cygwin': # cygwin/win32 (confirmed)
def device(port):
return '/dev/com%d' % (port + 1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {
128000: 0x01003,
256000: 0x01005,
500000: 0x01007,
576000: 0x01008,
921600: 0x01009,
1000000: 0x0100a,
1152000: 0x0100b,
1500000: 0x0100c,
2000000: 0x0100d,
2500000: 0x0100e,
3000000: 0x0100f
}
elif plat[:7] == 'openbsd': # OpenBSD
def device(port):
return '/dev/cua%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd':
def device(port):
return '/dev/cuad%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:6] == 'darwin': # OS X
version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(version[0]) >= 8:
def set_special_baudrate(port, baudrate):
# use IOKit-specific call to set up high speeds
import array, fcntl
buf = array.array('i', [baudrate])
IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t)
fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1)
else: # version < 8
def set_special_baudrate(port, baudrate):
raise ValueError("baud rate not supported")
def device(port):
return '/dev/cuad%d' % port
baudrate_constants = {}
elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk
def device(port):
return '/dev/dty%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:4] == 'irix': # IRIX (partially tested)
def device(port):
return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:2] == 'hp': # HP-UX (not tested)
def device(port):
return '/dev/tty%dp0' % (port+1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed)
def device(port):
return '/dev/tty%c' % (ord('a')+port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'aix': # AIX
def device(port):
return '/dev/tty%d' % (port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
else:
# platform detection has failed...
sys.stderr.write("""\
don't know how to number ttys on this system.
! Use an explicit path (eg /dev/ttyS1) or send this information to
! the author of this module:
sys.platform = %r
os.name = %r
serialposix.py version = %s
also add the device name of the serial port and where the
counting starts for the first serial port.
e.g. 'first serial port: /dev/ttyS0'
and with a bit luck you can get this module running...
""" % (sys.platform, os.name, VERSION))
# no exception, just continue with a brave attempt to build a device name
# even if the device name is not correct for the platform it has chances
# to work using a string with the real device name as port parameter.
def device(portum):
return '/dev/ttyS%d' % portnum
def set_special_baudrate(port, baudrate):
raise SerialException("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
#~ raise Exception, "this module does not run on this platform, sorry."
# whats up with "aix", "beos", ....
# they should work, just need to know the device names.
# load some constants for later use.
# try to use values from TERMIOS, use defaults from linux otherwise
TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
if hasattr(TERMIOS, 'TIOCINQ'):
TIOCINQ = TERMIOS.TIOCINQ
else:
TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
TIOCOUTQ = hasattr(TERMIOS, 'TIOCOUTQ') and TERMIOS.TIOCOUTQ or 0x5411
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
class PosixSerial(SerialBase):
"""Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems."""
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
self.fd = None
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK)
except IOError, msg:
self.fd = None
raise SerialException(msg.errno, "could not open port %s: %s" % (self._port, msg))
#~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) # set blocking
try:
self._reconfigurePort()
except:
try:
os.close(self.fd)
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
raise
else:
self._isOpen = True
self.flushInput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._interCharTimeout is not None:
vmin = 1
vtime = int(self._interCharTimeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error, msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: %s" % msg)
# set up raw mode / no echo / binary
cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD)
lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL|
TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(TERMIOS, flag):
lflag &= ~getattr(TERMIOS, flag)
oflag &= ~(TERMIOS.OPOST)
iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK)
if hasattr(TERMIOS, 'IUCLC'):
iflag &= ~TERMIOS.IUCLC
if hasattr(TERMIOS, 'PARMRK'):
iflag &= ~TERMIOS.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(TERMIOS, 'B%s' % (self._baudrate))
except AttributeError:
try:
ispeed = ospeed = baudrate_constants[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(TERMIOS, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
# setup char len
cflag &= ~TERMIOS.CSIZE
if self._bytesize == 8:
cflag |= TERMIOS.CS8
elif self._bytesize == 7:
cflag |= TERMIOS.CS7
elif self._bytesize == 6:
cflag |= TERMIOS.CS6
elif self._bytesize == 5:
cflag |= TERMIOS.CS5
else:
raise ValueError('Invalid char len: %r' % self._bytesize)
# setup stopbits
if self._stopbits == STOPBITS_ONE:
cflag &= ~(TERMIOS.CSTOPB)
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
cflag |= (TERMIOS.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == STOPBITS_TWO:
cflag |= (TERMIOS.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: %r' % self._stopbits)
# setup parity
iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP)
if self._parity == PARITY_NONE:
cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD)
elif self._parity == PARITY_EVEN:
cflag &= ~(TERMIOS.PARODD)
cflag |= (TERMIOS.PARENB)
elif self._parity == PARITY_ODD:
cflag |= (TERMIOS.PARENB|TERMIOS.PARODD)
else:
raise ValueError('Invalid parity: %r' % self._parity)
# setup flow control
# xonxoff
if hasattr(TERMIOS, 'IXANY'):
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY)
else:
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF)
# rtscts
if hasattr(TERMIOS, 'CRTSCTS'):
if self._rtscts:
cflag |= (TERMIOS.CRTSCTS)
else:
cflag &= ~(TERMIOS.CRTSCTS)
elif hasattr(TERMIOS, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (TERMIOS.CNEW_RTSCTS)
else:
cflag &= ~(TERMIOS.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. = for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: %r ' % vmin)
cc[TERMIOS.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: %r' % vtime)
cc[TERMIOS.VTIME] = vtime
# activate settings
if [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
set_special_baudrate(self, custom_baud)
def close(self):
"""Close port"""
if self._isOpen:
if self.fd is not None:
os.close(self.fd)
self.fd = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I',s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._isOpen: raise portNotOpenError
read = bytearray()
while len(read) < size:
try:
ready,_,_ = select.select([self.fd],[],[], self._timeout)
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when there
# is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size-len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException('device reports readiness to read but returned no data (device disconnected or multiple access on port?)')
read.extend(buf)
except select.error, e:
# ignore EAGAIN errors. all other errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] != errno.EAGAIN:
raise SerialException('read failed: %s' % (e,))
except OSError, e:
# ignore EAGAIN errors. all other errors are shown
if e.errno != errno.EAGAIN:
raise SerialException('read failed: %s' % (e,))
return bytes(read)
def write(self, data):
"""Output the given string over the serial port."""
if not self._isOpen: raise portNotOpenError
d = to_bytes(data)
tx_len = len(d)
if self._writeTimeout is not None and self._writeTimeout > 0:
timeout = time.time() + self._writeTimeout
else:
timeout = None
while tx_len > 0:
try:
n = os.write(self.fd, d)
if timeout:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
timeleft = timeout - time.time()
if timeleft < 0:
raise writeTimeoutError
_, ready, _ = select.select([], [self.fd], [], timeleft)
if not ready:
raise writeTimeoutError
else:
# wait for write operation
_, ready, _ = select.select([], [self.fd], [], None)
if not ready:
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except OSError, v:
if v.errno != errno.EAGAIN:
raise SerialException('write failed: %s' % (v,))
return len(data)
def flush(self):
"""Flush of file like objects. In this case, wait until all data
is written."""
self.drainOutput()
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCIFLUSH)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCOFLUSH)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given duration."""
if not self._isOpen: raise portNotOpenError
termios.tcsendbreak(self.fd, int(duration/0.25))
def setBreak(self, level=1):
"""Set break: Controls TXD. When active, no transmitting is possible."""
if self.fd is None: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
def setRTS(self, level=1):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def setDTR(self, level=1):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CTS != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_DSR != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_RI != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
def outWaiting(self):
"""Return the number of characters currently in the output buffer."""
#~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
return struct.unpack('I',s)[0]
def drainOutput(self):
"""internal - not portable!"""
if not self._isOpen: raise portNotOpenError
termios.tcdrain(self.fd)
def nonblocking(self):
"""internal - not portable!"""
if not self._isOpen: raise portNotOpenError
fcntl.fcntl(self.fd, FCNTL.F_SETFL, os.O_NONBLOCK)
def fileno(self):
"""\
For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!
"""
if not self._isOpen: raise portNotOpenError
return self.fd
def setXON(self, level=True):
"""\
Manually control flow - when software flow control is enabled.
This will send XON (true) and XOFF (false) to the other device.
WARNING: this function is not portable to different platforms!
"""
if not self.hComPort: raise portNotOpenError
if enable:
termios.tcflow(self.fd, TERMIOS.TCION)
else:
termios.tcflow(self.fd, TERMIOS.TCIOFF)
def flowControlOut(self, enable):
"""\
Manually control flow of outgoing data - when hardware or software flow
control is enabled.
WARNING: this function is not portable to different platforms!
"""
if not self._isOpen: raise portNotOpenError
if enable:
termios.tcflow(self.fd, TERMIOS.TCOON)
else:
termios.tcflow(self.fd, TERMIOS.TCOOFF)
# assemble Serial class with the platform specifc implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derrive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(PosixSerial, FileLike):
pass
else:
# io library present
class Serial(PosixSerial, io.RawIOBase):
pass
class PosixPollSerial(Serial):
"""poll based read implementation. not all systems support poll properly.
however this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged)"""
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if self.fd is None: raise portNotOpenError
read = bytearray()
poll = select.poll()
poll.register(self.fd, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(self._timeout*1000):
if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if ((self._timeout is not None and self._timeout >= 0) or
(self._interCharTimeout is not None and self._interCharTimeout > 0)) and not buf:
break # early abort on timeout
return bytes(read)
if __name__ == '__main__':
s = Serial(0,
baudrate=19200, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_EVEN, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=3, # set a timeout value, None for waiting forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
)
s.setRTS(1)
s.setDTR(1)
s.flushInput()
s.flushOutput()
s.write('hello')
sys.stdout.write('%r\n' % s.read(5))
sys.stdout.write('%s\n' % s.inWaiting())
del s
|
anandsimmy/ecommerce
|
refs/heads/master
|
src/oscar/apps/address/migrations/0003_auto_20150927_1551.py
|
10
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('address', '0002_auto_20150927_1547'),
]
operations = [
migrations.AlterModelOptions(
name='useraddress',
options={'ordering': ['-num_orders_as_shipping_address'], 'verbose_name': 'User address',
'verbose_name_plural': 'User addresses'},
),
migrations.AlterField(
model_name='useraddress',
name='num_orders_as_billing_address',
field=models.PositiveIntegerField(default=0, verbose_name='Number of Orders as Shipping Address'),
),
migrations.AlterField(
model_name='useraddress',
name='num_orders_as_shipping_address',
field=models.PositiveIntegerField(default=0, verbose_name='Number of Orders as Billing Address'),
),
]
|
Shihta/python-novaclient
|
refs/heads/shihta0
|
novaclient/v1_1/usage.py
|
5
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Usage interface.
"""
from novaclient import base
class Usage(base.Resource):
"""
Usage contains information about a tenant's physical resource usage
"""
def __repr__(self):
return "<ComputeUsage>"
class UsageManager(base.ManagerWithFind):
"""
Manage :class:`Usage` resources.
"""
resource_class = Usage
def list(self, start, end, detailed=False):
"""
Get usage for all tenants
:param start: :class:`datetime.datetime` Start date
:param end: :class:`datetime.datetime` End date
:param detailed: Whether to include information about each
instance whose usage is part of the report
:rtype: list of :class:`Usage`.
"""
return self._list(
"/os-simple-tenant-usage?start=%s&end=%s&detailed=%s" %
(start.isoformat(), end.isoformat(), int(bool(detailed))),
"tenant_usages")
def get(self, tenant_id, start, end):
"""
Get usage for a specific tenant.
:param tenant_id: Tenant ID to fetch usage for
:param start: :class:`datetime.datetime` Start date
:param end: :class:`datetime.datetime` End date
:rtype: :class:`Usage`
"""
return self._get("/os-simple-tenant-usage/%s?start=%s&end=%s" %
(tenant_id, start.isoformat(), end.isoformat()),
"tenant_usage")
|
knxd/PyKNyX
|
refs/heads/master
|
pyknyx/proto/connection.py
|
2
|
"""
"""
import threading
class BaseConnection(threading.Thread):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
super(BaseConnection, self).__init__(*args, **kwargs)
self._run = False
def stop():
"""
"""
self._run = True
class KnxConnection(BaseConnection):
"""
"""
def __init__(self, telegramListener, url="localhost"):
"""
"""
super(KnxConnection, self).__init__()
self._url = url
self._telegramListener = telegramListener
self._eibConnection = None
self._stop = self._run = False
#def run(self):
#"""
#"""
#while not self._stop:
#self._eibConnection = EIBConnection.EIBSocketUrl(self._url)
#if self._eibConnection == 0:
#{
#EIBSetEvent (con_m, stop_m);
#if (EIBOpen_GroupSocket (con_m, 0) != -1)
#{
#logger_m.infoStream() << "KnxConnection: Group socket opened. Waiting for messages." << endlog;
#int retval;
#while ((retval = checkInput()) > 0)
#{
#/* TODO: find another way to check if event occured
#struct timeval tv;
#tv.tv_sec = 1;
#tv.tv_usec = 0;
#pth_select_ev(0,0,0,0,&tv,stop);
#*/
#}
#if (retval == -1)
#retry = false;
#}
#else
#logger_m.errorStream() << "Failed to open group socket." << endlog;
#if (con_m)
#EIBClose(con_m);
#con_m = 0;
#}
#else
#logger_m.errorStream() << "Failed to open knxConnection url." << endlog;
#if (retry)
#{
#struct timeval tv;
#tv.tv_sec = 60;
#tv.tv_usec = 0;
#pth_select_ev(0,0,0,0,&tv,stop_m);
#if (pth_event_status (stop_m) == PTH_STATUS_OCCURRED)
#retry = false;
#}
#}
#logger_m.infoStream() << "Out of KnxConnection loop." << endlog;
##pth_event_free (stop_m, PTH_FREE_THIS); # use Event
#self._run = False # use Event
#def write(gad, buffer_):
#"""
#"""
#if(gad == 0)
#return;
#logger_m.infoStream() << "write(gad=" << Object::WriteGroupAddr(gad) << ", buf, len=" << len << ")" << endlog;
#if (con_m)
#{
#len = EIBSendGroup (con_m, gad, len, buf);
#if (len == -1)
#{
#logger_m.errorStream() << "Write request failed (gad=" << Object::WriteGroupAddr(gad) << ", buf, len=" << len << ")" << endlog;
#}
#else
#{
#logger_m.debugStream() << "Write request sent" << endlog;
#}
#}
#def checkInput(self):
#"""
#"""
#int len;
#eibaddr_t dest;
#eibaddr_t src;
#uint8_t buf[200];
#if (!con_m)
#return 0;
#len = EIBGetGroup_Src (con_m, sizeof (buf), buf, &src, &dest);
#if (pth_event_status (stop_m) == PTH_STATUS_OCCURRED)
#return -1;
#if (len == -1)
#{
#logger_m.errorStream() << "Read failed" << endlog;
#return 0;
#}
#if (len < 2)
#{
#logger_m.warnStream() << "Invalid Packet (too short)" << endlog;
#return 0;
#}
#if (buf[0] & 0x3 || (buf[1] & 0xC0) == 0xC0)
#{
#logger_m.warnStream() << "Unknown APDU from "<< src << " to " << dest << endlog;
#}
#else
#{
#if (logger_m.isDebugEnabled())
#{
#DbgStream dbg = logger_m.debugStream();
#switch (buf[1] & 0xC0)
#{
#case 0x00:
#dbg << "Read";
#break;
#case 0x40:
#dbg << "Response";
#break;
#case 0x80:
#dbg << "Write";
#break;
#}
#dbg << " from " << Object::WriteAddr(src) << " to " << Object::WriteGroupAddr(dest);
#if (buf[1] & 0xC0)
#{
#dbg << ": " << std::hex << std::setfill ('0') << std::setw (2);
#if (len == 2)
#dbg << (int)(buf[1] & 0x3F);
#else
#{
#for (uint8_t *p = buf+2; p < buf+len; p++)
#dbg << (int)*p << " ";
#}
#}
#dbg << std::dec << endlog;
#}
#if (listener_m)
#{
#switch (buf[1] & 0xC0)
#{
#case 0x00:
#listener_m->onRead(src, dest, buf, len);
#break;
#case 0x40:
#listener_m->onResponse(src, dest, buf, len);
#break;
#case 0x80:
#listener_m->onWrite(src, dest, buf, len);
#break;
#}
#}
#}
#return 1;
def isRunning(self):
"""
"""
return self._run
|
uwafsl/MissionPlanner
|
refs/heads/master
|
Lib/wave.py
|
53
|
"""Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import __builtin__
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'l'
# Determine endian-ness
import struct
if struct.pack("h", 1) == "\000\001":
big_endian = 1
else:
big_endian = 0
from chunk import Chunk
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != 'RIFF':
raise Error, 'file does not start with RIFF id'
if self._file.read(4) != 'WAVE':
raise Error, 'not a WAVE file'
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == 'data':
if not self._fmt_chunk_read:
raise Error, 'data chunk before fmt chunk'
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error, 'fmt chunk and/or data chunk missing'
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return ''
if self._sampwidth > 1 and big_endian:
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tostring()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack('<h', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error, 'unknown format: %r' % (wFormatTag,)
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE',):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error, 'setmark() not supported'
def getmark(self, id):
raise Error, 'no marks'
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth > 1 and big_endian:
import array
data = array.array(_array_fmts[self._sampwidth], data)
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write('RIFF')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<l4s4slhhllhh4s',
36 + self._datalength, 'WAVE', 'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, 'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<l', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<l', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<l', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
|
1fabunicorn/resnix
|
refs/heads/master
|
mainmodules/IO.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
Input Output function
reads from saves, writes plaintext on 'unlock'
Copyright (C) 2017, Nova Trauben, noah.trauben@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from mainmodules import ignorethis
def write_plaintext(cyphertext, key, file_to_create):
with open(cyphertext, 'r') as cyphertext_file:
cyphertext_file = cyphertext_file.readlines(0)
for x in cyphertext_file:
cyphertext_file = x
cyphertext_file = cyphertext_file.split()
cyphertext_num = []
for num in cyphertext_file:
cyphertext_num.append(int(num))
plaintext = ignorethis.decrypt(cyphertext_num, key)
with open(file_to_create, 'w+') as f:
print(plaintext, file=f)
def print_intro():
spaces, bars, under_scores, new_lines = " ", "▇", "_", "\n"
print((115 * bars) + (new_lines) + (2 * spaces) + (3 * bars) + (6 * spaces) +
under_scores + (7 * spaces) + (2 * under_scores) + (spaces * 5) + (2 *
under_scores) + (29 * spaces) + (6 * under_scores) + (48 * spaces) +
(3 * bars) + new_lines + (2 * spaces) + (3 * bars) + (5 * spaces)
+ '| | / /__ / /________ ____ ___ ___ /_ __/___' + (45 * spaces)
+ '▇▇▇\n ▇▇▇ | | /| / / _ \\/ / ___/ __ \\/ __ `__ \\/ _ \\ '
'/ / / __ \\ ____ ___________ _ _______ __ ▇▇▇\n ▇▇▇ '
'| |/ |/ / __/ / /__/ /_/ / / / / / / __/ / / / /_/ / / __ \\/'
' ____/ ___// | / / _/ |/ / ▇▇▇\n ▇▇▇ |__/|__/\\___/_/\\'
'___/\\____/_/ /_/ /_/\\___/ /_/ \\____/ / /_/ / __/ \\__ '
'\\/ |/ // / | / ▇▇▇\n ▇▇▇' + (35 * spaces) + '_'
+ (30 * spaces) + ' / _, _/ /___ ___/ / /| // / / | ▇▇▇\n'
' ▇▇▇ _|_ _ _ |_ _ |._ _|__ _ |_ _ | _ '
' /_/ |_/_____//____/_/ |_/___//_/|_| ▇▇▇\n ▇▇▇ '
' |_\\/|_)(/_ | |(/_||_) |(_)| | |(/_||_)' + (55 * spaces)
+ '▇▇▇\n ▇▇▇ / | | |'
+ (57 * spaces) + '▇▇▇\n ▇▇▇' + (106 * spaces) + (3 * bars)
+ (new_lines) + (2 * spaces) + (3 * bars) + (106 * spaces) + (3 * bars)
+ (new_lines) + (115 * bars))
def pw():
try:
password = raw_input("password: ")
except NameError:
input("password:")
if password:
return password
else:
return None
|
deepinsight/Deformable-ConvNets
|
refs/heads/master
|
lib/utils/PrefetchingIter.py
|
2
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
import mxnet as mx
from mxnet.io import DataDesc, DataBatch
import threading
class PrefetchingIter(mx.io.DataIter):
"""Base class for prefetching iterators. Takes one or more DataIters (
or any class with "reset" and "next" methods) and combine them with
prefetching. For example:
Parameters
----------
iters : DataIter or list of DataIter
one or more DataIters (or any class with "reset" and "next" methods)
rename_data : None or list of dict
i-th element is a renaming map for i-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data
rename_label : None or list of dict
Similar to rename_data
Examples
--------
iter = PrefetchingIter([NDArrayIter({'data': X1}), NDArrayIter({'data': X2})],
rename_data=[{'data': 'data1'}, {'data': 'data2'}])
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter ==1, "Our prefetching iter only support 1 DataIter"
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = len(self.provide_data) * self.provide_data[0][0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for e in self.data_taken:
e.set()
self.started = True
self.current_batch = [None for _ in range(self.n_iter)]
self.next_batch = [None for _ in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for e in self.data_taken:
e.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
"""The name and shape of data provided by this iterator"""
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
"""The name and shape of label provided by this iterator"""
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for e in self.data_ready:
e.wait()
for i in self.iters:
i.reset()
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
def iter_next(self):
for e in self.data_ready:
e.wait()
if self.next_batch[0] is None:
return False
else:
self.current_batch = self.next_batch[0]
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
|
rahuldhote/odoo
|
refs/heads/8.0
|
addons/calendar/contacts.py
|
389
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class calendar_contacts(osv.osv):
_name = 'calendar.contacts'
_columns = {
'user_id': fields.many2one('res.users','Me'),
'partner_id': fields.many2one('res.partner','Employee',required=True, domain=[]),
'active':fields.boolean('active'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'active' : True,
}
|
lparsons/bioconda-recipes
|
refs/heads/master
|
recipes/wtforms-components/setup.py
|
63
|
"""
WTForms-Components
------------------
Additional fields, validators and widgets for WTForms.
"""
from setuptools import setup
import os
import re
import sys
HERE = os.path.dirname(os.path.abspath(__file__))
PY3 = sys.version_info[0] == 3
def get_version():
filename = os.path.join(HERE, 'wtforms_components', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
extras_require = {
'test': [
'pytest>=2.2.3',
'flexmock>=0.9.7',
'WTForms-Test>=0.1.1',
'flake8>=2.4.0',
'isort>=4.2.2',
],
'color': ['colour>=0.0.4'],
'ipaddress': ['ipaddr'] if not PY3 else [],
'timezone': ['python-dateutil'],
}
# Add all optional dependencies to testing requirements.
for name, requirements in extras_require.items():
if name != 'test':
extras_require['test'] += requirements
setup(
name='WTForms-Components',
version=get_version(),
url='https://github.com/kvesteri/wtforms-components',
license='BSD',
author='Konsta Vesterinen',
author_email='konsta@fastmonkeys.com',
description='Additional fields, validators and widgets for WTForms.',
long_description=__doc__,
packages=[
'wtforms_components',
'wtforms_components.fields'
],
zip_safe=False,
include_package_data=True,
platforms='any',
dependency_links=[
# 5.6b1 only supports python 3.x / pending release
'git+git://github.com/daviddrysdale/python-phonenumbers.git@python3'
'#egg=phonenumbers3k-5.6b1',
],
install_requires=[
'WTForms>=1.0.4',
'six>=1.4.1',
'validators>=0.5.0',
'intervals>=0.6.0'
],
extras_require=extras_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
mangadul/WhatsAppy
|
refs/heads/master
|
yowsup/layers/axolotl/store/sqlite/liteaxolotlstore.py
|
17
|
from axolotl.state.axolotlstore import AxolotlStore
from .liteidentitykeystore import LiteIdentityKeyStore
from .liteprekeystore import LitePreKeyStore
from .litesessionstore import LiteSessionStore
from .litesignedprekeystore import LiteSignedPreKeyStore
from .litesenderkeystore import LiteSenderKeyStore
import sqlite3
class LiteAxolotlStore(AxolotlStore):
def __init__(self, db):
conn = sqlite3.connect(db, check_same_thread=False)
conn.text_factory = bytes
self.identityKeyStore = LiteIdentityKeyStore(conn)
self.preKeyStore = LitePreKeyStore(conn)
self.signedPreKeyStore = LiteSignedPreKeyStore(conn)
self.sessionStore = LiteSessionStore(conn)
self.senderKeyStore = LiteSenderKeyStore(conn)
def getIdentityKeyPair(self):
return self.identityKeyStore.getIdentityKeyPair()
def storeLocalData(self, registrationId, identityKeyPair):
self.identityKeyStore.storeLocalData(registrationId, identityKeyPair)
def getLocalRegistrationId(self):
return self.identityKeyStore.getLocalRegistrationId()
def saveIdentity(self, recepientId, identityKey):
self.identityKeyStore.saveIdentity(recepientId, identityKey)
def isTrustedIdentity(self, recepientId, identityKey):
return self.identityKeyStore.isTrustedIdentity(recepientId, identityKey)
def loadPreKey(self, preKeyId):
return self.preKeyStore.loadPreKey(preKeyId)
def loadPreKeys(self):
return self.preKeyStore.loadPendingPreKeys()
def storePreKey(self, preKeyId, preKeyRecord):
self.preKeyStore.storePreKey(preKeyId, preKeyRecord)
def containsPreKey(self, preKeyId):
return self.preKeyStore.containsPreKey(preKeyId)
def removePreKey(self, preKeyId):
self.preKeyStore.removePreKey(preKeyId)
def loadSession(self, recepientId, deviceId):
return self.sessionStore.loadSession(recepientId, deviceId)
def getSubDeviceSessions(self, recepientId):
return self.sessionStore.getSubDeviceSessions(recepientId)
def storeSession(self, recepientId, deviceId, sessionRecord):
self.sessionStore.storeSession(recepientId, deviceId, sessionRecord)
def containsSession(self, recepientId, deviceId):
return self.sessionStore.containsSession(recepientId, deviceId)
def deleteSession(self, recepientId, deviceId):
self.sessionStore.deleteSession(recepientId, deviceId)
def deleteAllSessions(self, recepientId):
self.sessionStore.deleteAllSessions(recepientId)
def loadSignedPreKey(self, signedPreKeyId):
return self.signedPreKeyStore.loadSignedPreKey(signedPreKeyId)
def loadSignedPreKeys(self):
return self.signedPreKeyStore.loadSignedPreKeys()
def storeSignedPreKey(self, signedPreKeyId, signedPreKeyRecord):
self.signedPreKeyStore.storeSignedPreKey(signedPreKeyId, signedPreKeyRecord)
def containsSignedPreKey(self, signedPreKeyId):
return self.signedPreKeyStore.containsSignedPreKey(signedPreKeyId)
def removeSignedPreKey(self, signedPreKeyId):
self.signedPreKeyStore.removeSignedPreKey(signedPreKeyId)
def loadSenderKey(self, senderKeyName):
return self.senderKeyStore.loadSenderKey(senderKeyName)
def storeSenderKey(self, senderKeyName, senderKeyRecord):
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord)
|
akosyakov/intellij-community
|
refs/heads/master
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py
|
326
|
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
shreyasp/erpnext
|
refs/heads/develop
|
erpnext/stock/dashboard/__init__.py
|
12133432
| |
kennethdecker/MagnePlane
|
refs/heads/master
|
src/__init__.py
|
12133432
| |
JioCloud/nova_test_latest
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_plugins/__init__.py
|
12133432
| |
neopenx/Dragon
|
refs/heads/master
|
examples/Seg-FCN/voc-fcn16s/test.py
|
1
|
# --------------------------------------------------------
# Seg-FCN for Dragon
# Copyright (c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
""" Test a FCN-16s(PASCAL VOC) network """
import dragon.vm.caffe as caffe
import score
import numpy as np
weights = 'snapshot/train_iter_44000.caffemodel'
if __name__ == '__main__':
# init
caffe.set_mode_gpu()
caffe.set_device(0)
solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)
# scoring
val = np.loadtxt('../data/seg11valid.txt', dtype=str)
score.seg_tests(solver, 'seg', val)
|
Tudorvr/metagoofil
|
refs/heads/master
|
hachoir_core/stream/__init__.py
|
95
|
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.stream.stream import StreamError
from hachoir_core.stream.input import (
InputStreamError,
InputStream, InputIOStream, StringInputStream,
InputSubStream, InputFieldStream,
FragmentedStream, ConcatStream)
from hachoir_core.stream.input_helper import FileInputStream, guessStreamCharset
from hachoir_core.stream.output import (OutputStreamError,
FileOutputStream, StringOutputStream, OutputStream)
|
OpenNetworkingFoundation/ONFOpenTransport
|
refs/heads/develop
|
RI/flask_server/tapi_server/models/tapi_common_getserviceinterfacepointlist_output_sip.py
|
4
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState # noqa: F401,E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_common_service_interface_point import TapiCommonServiceInterfacePoint # noqa: F401,E501
from tapi_server.models.tapi_photonic_media_media_channel_service_interface_point_spec import TapiPhotonicMediaMediaChannelServiceInterfacePointSpec # noqa: F401,E501
from tapi_server.models.tapi_photonic_media_otsi_service_interface_point_spec import TapiPhotonicMediaOtsiServiceInterfacePointSpec # noqa: F401,E501
from tapi_server.models.tapi_photonic_media_sip_augmentation1 import TapiPhotonicMediaSipAugmentation1 # noqa: F401,E501
from tapi_server.models.tapi_photonic_media_sip_augmentation2 import TapiPhotonicMediaSipAugmentation2 # noqa: F401,E501
from tapi_server import util
class TapiCommonGetserviceinterfacepointlistOutputSip(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, available_capacity=None, total_potential_capacity=None, name=None, uuid=None, supported_layer_protocol_qualifier=None, layer_protocol_name=None, media_channel_service_interface_point_spec=None, otsi_service_interface_point_spec=None): # noqa: E501
"""TapiCommonGetserviceinterfacepointlistOutputSip - a model defined in OpenAPI
:param operational_state: The operational_state of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param administrative_state: The administrative_state of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type administrative_state: TapiCommonAdministrativeState
:param available_capacity: The available_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type available_capacity: TapiCommonCapacity
:param total_potential_capacity: The total_potential_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type total_potential_capacity: TapiCommonCapacity
:param name: The name of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type uuid: str
:param supported_layer_protocol_qualifier: The supported_layer_protocol_qualifier of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type supported_layer_protocol_qualifier: List[str]
:param layer_protocol_name: The layer_protocol_name of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
:param media_channel_service_interface_point_spec: The media_channel_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type media_channel_service_interface_point_spec: TapiPhotonicMediaMediaChannelServiceInterfacePointSpec
:param otsi_service_interface_point_spec: The otsi_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:type otsi_service_interface_point_spec: TapiPhotonicMediaOtsiServiceInterfacePointSpec
"""
self.openapi_types = {
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'administrative_state': TapiCommonAdministrativeState,
'available_capacity': TapiCommonCapacity,
'total_potential_capacity': TapiCommonCapacity,
'name': List[TapiCommonNameAndValue],
'uuid': str,
'supported_layer_protocol_qualifier': List[str],
'layer_protocol_name': TapiCommonLayerProtocolName,
'media_channel_service_interface_point_spec': TapiPhotonicMediaMediaChannelServiceInterfacePointSpec,
'otsi_service_interface_point_spec': TapiPhotonicMediaOtsiServiceInterfacePointSpec
}
self.attribute_map = {
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'administrative_state': 'administrative-state',
'available_capacity': 'available-capacity',
'total_potential_capacity': 'total-potential-capacity',
'name': 'name',
'uuid': 'uuid',
'supported_layer_protocol_qualifier': 'supported-layer-protocol-qualifier',
'layer_protocol_name': 'layer-protocol-name',
'media_channel_service_interface_point_spec': 'media-channel-service-interface-point-spec',
'otsi_service_interface_point_spec': 'otsi-service-interface-point-spec'
}
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._administrative_state = administrative_state
self._available_capacity = available_capacity
self._total_potential_capacity = total_potential_capacity
self._name = name
self._uuid = uuid
self._supported_layer_protocol_qualifier = supported_layer_protocol_qualifier
self._layer_protocol_name = layer_protocol_name
self._media_channel_service_interface_point_spec = media_channel_service_interface_point_spec
self._otsi_service_interface_point_spec = otsi_service_interface_point_spec
@classmethod
def from_dict(cls, dikt) -> 'TapiCommonGetserviceinterfacepointlistOutputSip':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.common.getserviceinterfacepointlist.output.Sip of this TapiCommonGetserviceinterfacepointlistOutputSip. # noqa: E501
:rtype: TapiCommonGetserviceinterfacepointlistOutputSip
"""
return util.deserialize_model(dikt, cls)
@property
def operational_state(self):
"""Gets the operational_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The operational_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param operational_state: The operational_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The lifecycle_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param lifecycle_state: The lifecycle_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def administrative_state(self):
"""Gets the administrative_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The administrative_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonAdministrativeState
"""
return self._administrative_state
@administrative_state.setter
def administrative_state(self, administrative_state):
"""Sets the administrative_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param administrative_state: The administrative_state of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type administrative_state: TapiCommonAdministrativeState
"""
self._administrative_state = administrative_state
@property
def available_capacity(self):
"""Gets the available_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The available_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonCapacity
"""
return self._available_capacity
@available_capacity.setter
def available_capacity(self, available_capacity):
"""Sets the available_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param available_capacity: The available_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type available_capacity: TapiCommonCapacity
"""
self._available_capacity = available_capacity
@property
def total_potential_capacity(self):
"""Gets the total_potential_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The total_potential_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonCapacity
"""
return self._total_potential_capacity
@total_potential_capacity.setter
def total_potential_capacity(self, total_potential_capacity):
"""Sets the total_potential_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param total_potential_capacity: The total_potential_capacity of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type total_potential_capacity: TapiCommonCapacity
"""
self._total_potential_capacity = total_potential_capacity
@property
def name(self):
"""Gets the name of this TapiCommonGetserviceinterfacepointlistOutputSip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiCommonGetserviceinterfacepointlistOutputSip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiCommonGetserviceinterfacepointlistOutputSip.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiCommonGetserviceinterfacepointlistOutputSip.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type uuid: str
"""
self._uuid = uuid
@property
def supported_layer_protocol_qualifier(self):
"""Gets the supported_layer_protocol_qualifier of this TapiCommonGetserviceinterfacepointlistOutputSip.
none # noqa: E501
:return: The supported_layer_protocol_qualifier of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: List[str]
"""
return self._supported_layer_protocol_qualifier
@supported_layer_protocol_qualifier.setter
def supported_layer_protocol_qualifier(self, supported_layer_protocol_qualifier):
"""Sets the supported_layer_protocol_qualifier of this TapiCommonGetserviceinterfacepointlistOutputSip.
none # noqa: E501
:param supported_layer_protocol_qualifier: The supported_layer_protocol_qualifier of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type supported_layer_protocol_qualifier: List[str]
"""
self._supported_layer_protocol_qualifier = supported_layer_protocol_qualifier
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The layer_protocol_name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param layer_protocol_name: The layer_protocol_name of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
@property
def media_channel_service_interface_point_spec(self):
"""Gets the media_channel_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The media_channel_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiPhotonicMediaMediaChannelServiceInterfacePointSpec
"""
return self._media_channel_service_interface_point_spec
@media_channel_service_interface_point_spec.setter
def media_channel_service_interface_point_spec(self, media_channel_service_interface_point_spec):
"""Sets the media_channel_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param media_channel_service_interface_point_spec: The media_channel_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type media_channel_service_interface_point_spec: TapiPhotonicMediaMediaChannelServiceInterfacePointSpec
"""
self._media_channel_service_interface_point_spec = media_channel_service_interface_point_spec
@property
def otsi_service_interface_point_spec(self):
"""Gets the otsi_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:return: The otsi_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:rtype: TapiPhotonicMediaOtsiServiceInterfacePointSpec
"""
return self._otsi_service_interface_point_spec
@otsi_service_interface_point_spec.setter
def otsi_service_interface_point_spec(self, otsi_service_interface_point_spec):
"""Sets the otsi_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:param otsi_service_interface_point_spec: The otsi_service_interface_point_spec of this TapiCommonGetserviceinterfacepointlistOutputSip.
:type otsi_service_interface_point_spec: TapiPhotonicMediaOtsiServiceInterfacePointSpec
"""
self._otsi_service_interface_point_spec = otsi_service_interface_point_spec
|
smart-make/zxing
|
refs/heads/master
|
cpp/scons/scons-local-2.0.0.final.0/SCons/Platform/posix.py
|
34
|
"""SCons.Platform.posix
Platform-specific initialization for POSIX (Linux, UNIX, etc.) systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/posix.py 5023 2010/06/14 22:05:46 scons"
import errno
import os
import os.path
import subprocess
import sys
import select
import SCons.Util
from SCons.Platform import TempFileMunge
exitvalmap = {
2 : 127,
13 : 126,
}
def escape(arg):
"escape shell special characters"
slash = '\\'
special = '"$()'
arg = arg.replace(slash, slash+slash)
for c in special:
arg = arg.replace(c, slash+c)
return '"' + arg + '"'
def exec_system(l, env):
stat = os.system(' '.join(l))
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_spawnvpe(l, env):
stat = os.spawnvpe(os.P_WAIT, l[0], l, env)
# os.spawnvpe() returns the actual exit code, not the encoding
# returned by os.waitpid() or os.system().
return stat
def exec_fork(l, env):
pid = os.fork()
if not pid:
# Child process.
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process.
pid, stat = os.waitpid(pid, 0)
if stat & 0xff:
return stat | 0x80
return stat >> 8
def _get_env_command(sh, escape, cmd, args, env):
s = ' '.join(args)
if env:
l = ['env', '-'] + \
[escape(t[0])+'='+escape(t[1]) for t in env.items()] + \
[sh, '-c', escape(s)]
s = ' '.join(l)
return s
def env_spawn(sh, escape, cmd, args, env):
return exec_system([_get_env_command( sh, escape, cmd, args, env)], env)
def spawnvpe_spawn(sh, escape, cmd, args, env):
return exec_spawnvpe([sh, '-c', ' '.join(args)], env)
def fork_spawn(sh, escape, cmd, args, env):
return exec_fork([sh, '-c', ' '.join(args)], env)
def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
stdout_eof = stderr_eof = 0
while not (stdout_eof and stderr_eof):
try:
(i,o,e) = select.select([cmd_stdout, cmd_stderr], [], [])
if cmd_stdout in i:
str = cmd_stdout.read()
if len(str) == 0:
stdout_eof = 1
elif stdout is not None:
stdout.write(str)
if cmd_stderr in i:
str = cmd_stderr.read()
if len(str) == 0:
#sys.__stderr__.write( "stderr_eof=1\n" )
stderr_eof = 1
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
except select.error, (_errno, _strerror):
if _errno != errno.EINTR:
raise
def exec_popen3(l, env, stdout, stderr):
proc = subprocess.Popen(' '.join(l),
stdout=stdout,
stderr=stderr,
shell=True)
stat = proc.wait()
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_piped_fork(l, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
if stdout != stderr:
(rFdOut, wFdOut) = os.pipe()
(rFdErr, wFdErr) = os.pipe()
else:
(rFdOut, wFdOut) = os.pipe()
rFdErr = rFdOut
wFdErr = wFdOut
# do the fork
pid = os.fork()
if not pid:
# Child process
os.close( rFdOut )
if rFdOut != rFdErr:
os.close( rFdErr )
os.dup2( wFdOut, 1 ) # is there some symbolic way to do that ?
os.dup2( wFdErr, 2 )
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process
pid, stat = os.waitpid(pid, 0)
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
childOut = os.fdopen( rFdOut )
if stdout != stderr:
childErr = os.fdopen( rFdErr )
else:
childErr = childOut
process_cmd_output(childOut, childErr, stdout, stderr)
os.close( rFdOut )
if stdout != stderr:
os.close( rFdErr )
if stat & 0xff:
return stat | 0x80
return stat >> 8
def piped_env_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using Popen3 combined with the env command
# the command name and the command's stdout is written to stdout
# the command's stderr is written to stderr
return exec_popen3([_get_env_command(sh, escape, cmd, args, env)],
env, stdout, stderr)
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
return exec_piped_fork([sh, '-c', ' '.join(args)],
env, stdout, stderr)
def generate(env):
# If os.spawnvpe() exists, we use it to spawn commands. Otherwise
# if the env utility exists, we use os.system() to spawn commands,
# finally we fall back on os.fork()/os.exec().
#
# os.spawnvpe() is prefered because it is the most efficient. But
# for Python versions without it, os.system() is prefered because it
# is claimed that it works better with threads (i.e. -j) and is more
# efficient than forking Python.
#
# NB: Other people on the scons-users mailing list have claimed that
# os.fork()/os.exec() works better than os.system(). There may just
# not be a default that works best for all users.
if 'spawnvpe' in os.__dict__:
spawn = spawnvpe_spawn
elif env.Detect('env'):
spawn = env_spawn
else:
spawn = fork_spawn
if env.Detect('env'):
pspawn = piped_env_spawn
else:
pspawn = piped_fork_spawn
if 'ENV' not in env:
env['ENV'] = {}
env['ENV']['PATH'] = '/usr/local/bin:/opt/bin:/bin:/usr/bin'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = ''
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = '$LIBPREFIX'
env['SHLIBSUFFIX'] = '.so'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['PSPAWN'] = pspawn
env['SPAWN'] = spawn
env['SHELL'] = 'sh'
env['ESCAPE'] = escape
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
#Based on LINUX: ARG_MAX=ARG_MAX=131072 - 3000 for environment expansion
#Note: specific platforms might rise or lower this value
env['MAXLINELENGTH'] = 128072
# This platform supports RPATH specifications.
env['__RPATH'] = '$_RPATH'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
abaditsegay/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_pep352.py
|
51
|
import unittest
import __builtin__
import exceptions
import warnings
from test.test_support import run_unittest
import os
from platform import system as platform_system
def ignore_message_warning():
"""Ignore the DeprecationWarning for BaseException.message."""
warnings.resetwarnings()
warnings.filterwarnings("ignore", "BaseException.message",
DeprecationWarning)
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.failUnless(issubclass(Exception, object))
def verify_instance_interface(self, ins):
with warnings.catch_warnings():
ignore_message_warning()
for attr in ("args", "message", "__str__", "__repr__",
"__getitem__"):
self.failUnless(hasattr(ins, attr),
"%s missing %s attribute" %
(ins.__class__.__name__, attr))
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set(x for x in dir(exceptions) if not x.startswith('_'))
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(__builtin__, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.failUnless(superclass_name in exc_set)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(__builtin__, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.failUnless(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except TypeError:
pass
self.failUnless(exc_name in exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.failUnlessEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "message", "str", "unicode", "repr",
"indexing")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.failUnlessEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 1], [exc.args[0], arg],
[exc.message, arg],
[str(exc), str(arg)], [unicode(exc), unicode(arg)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)], [exc[0],
arg])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), arg_count], [exc.args, args],
[exc.message, ''], [str(exc), str(args)],
[unicode(exc), unicode(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)],
[exc[-1], args[-1]])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 0], [exc.args, tuple()],
[exc.message, ''],
[str(exc), ''], [unicode(exc), u''],
[repr(exc), exc.__class__.__name__ + '()'], [True, True])
self.interface_test_driver(results)
def test_message_deprecation(self):
# As of Python 2.6, BaseException.message is deprecated.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings('error')
try:
BaseException().message
except DeprecationWarning:
pass
else:
self.fail("BaseException.message not deprecated")
exc = BaseException()
try:
exc.message = ''
except DeprecationWarning:
pass
else:
self.fail("BaseException.message assignment not deprecated")
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise StandardError
except object_:
pass
except TypeError:
pass
except StandardError:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise StandardError
except (object_,):
pass
except TypeError:
return
except StandardError:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_classic(self):
# Raising a classic class is okay (for now).
class ClassicClass:
pass
try:
raise ClassicClass
except ClassicClass:
pass
except:
self.fail("unable to raise classic class")
try:
raise ClassicClass()
except ClassicClass:
pass
except:
self.fail("unable to raise class class instance")
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_string(self):
# Catching a string should trigger a DeprecationWarning.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings("error")
str_exc = "spam"
try:
try:
raise StandardError
except str_exc:
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception did not raise "
"DeprecationWarning")
# Make sure that even if the string exception is listed in a tuple
# that a warning is raised.
try:
try:
raise StandardError
except (AssertionError, str_exc):
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception specified in a tuple did "
"not raise DeprecationWarning")
def test_main():
run_unittest(ExceptionClassTests, UsageTests)
if __name__ == '__main__':
test_main()
|
raphael0202/spaCy
|
refs/heads/master
|
spacy/tests/en/test_contractions.py
|
4
|
# coding: utf-8
"""Test that tokens are created correctly for contractions."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_basic_contraction(en_tokenizer):
text = "don't giggle"
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text == "n't"
text = "i said don't!"
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[4].text == "!"
@pytest.mark.parametrize('text', ["`ain't", '''"isn't''', "can't!"])
def test_tokenizer_handles_basic_contraction_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text_poss,text', [("Robin's", "Robin"), ("Alexis's", "Alexis")])
def test_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text):
tokens = en_tokenizer(text_poss)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == "'s"
@pytest.mark.parametrize('text', ["schools'", "Alexis'"])
def test_tokenizer_splits_trailing_apos(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'"
@pytest.mark.parametrize('text', ["'em", "nothin'", "ol'"])
def text_tokenizer_doesnt_split_apos_exc(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].text == text
@pytest.mark.parametrize('text', ["we'll", "You'll", "there'll"])
def test_tokenizer_handles_ll_contraction(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'ll"
assert tokens[1].lemma_ == "will"
@pytest.mark.parametrize('text_lower,text_title', [("can't", "Can't"), ("ain't", "Ain't")])
def test_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title):
tokens_lower = en_tokenizer(text_lower)
tokens_title = en_tokenizer(text_title)
assert tokens_title[0].text == tokens_lower[0].text.title()
assert tokens_lower[0].text == tokens_title[0].text.lower()
assert tokens_lower[1].text == tokens_title[1].text
@pytest.mark.parametrize('pron', ["I", "You", "He", "She", "It", "We", "They"])
@pytest.mark.parametrize('contraction', ["'ll", "'d"])
def test_tokenizer_keeps_title_case(en_tokenizer, pron, contraction):
tokens = en_tokenizer(pron + contraction)
assert tokens[0].text == pron
assert tokens[1].text == contraction
@pytest.mark.parametrize('exc', ["Ill", "ill", "Hell", "hell", "Well", "well"])
def test_tokenizer_excludes_ambiguous(en_tokenizer, exc):
tokens = en_tokenizer(exc)
assert len(tokens) == 1
@pytest.mark.parametrize('wo_punct,w_punct', [("We've", "``We've"), ("couldn't", "couldn't)")])
def test_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct):
tokens = en_tokenizer(wo_punct)
assert len(tokens) == 2
tokens = en_tokenizer(w_punct)
assert len(tokens) == 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.