repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
algobunny/fanocean.ff
refs/heads/master
handlers_oauth.py
1
import json import hashlib import pytumblr import urlparse import code import oauth2 as oauth from ignore_me import pokedex from handlers_custom import SessionHandler from models import Author request_token_url = 'http://www.tumblr.com/oauth/request_token' authorize_url = 'http://www.tumblr.com/oauth/authorize' access_token_url = 'http://www.tumblr.com/oauth/access_token' class TumblrLogin(SessionHandler): def get(self): consumer_key = pokedex['tumblr']['rokku'] consumer_secret = pokedex['tumblr']['himitsu'] consumer = oauth.Consumer(consumer_key, consumer_secret) client = oauth.Client(consumer) resp, content = client.request(request_token_url, "POST") request_token = urlparse.parse_qs(content) token = request_token['oauth_token'][0] secret = request_token['oauth_token_secret'][0] self.session['oauth_token'] = token self.session['token_secret'] = secret go_to = '%s?oauth_token=%s' % (authorize_url, token) self.redirect(go_to) class TumblrCallback(SessionHandler): def get(self): oauth_token = self.request.get('oauth_token') oauth_verify = self.request.get('oauth_verifier') token = oauth.Token(self.session.get('oauth_token'), self.session.get('token_secret')) token.set_verifier(oauth_verify) consumer_key = pokedex['tumblr']['rokku'] consumer_secret = pokedex['tumblr']['himitsu'] consumer = oauth.Consumer(consumer_key, consumer_secret) client = oauth.Client(consumer, token) resp, content = client.request(access_token_url, "POST") access_token = urlparse.parse_qs(content) user_token = access_token['oauth_token'][0] user_secret = access_token['oauth_token_secret'][0] new_client = pytumblr.TumblrRestClient(consumer_key,consumer_secret,user_token,user_secret) user_info = new_client.info() #self.response.write(user_info) username = str(user_info['user']['name']) a = Author.query(Author.tumblr_username==username).get() if a==None: a = Author(tumblr_token=user_token, tumblr_secret=user_secret, tumblr_username=username) else: a = Author(tumblr_token=user_token, tumblr_secret=user_secret) a.put() self.redirect('/')
LearnEra/LearnEraPlaftform
refs/heads/master
lms/djangoapps/shoppingcart/migrations/0006_auto__add_field_order_refunded_time__add_field_orderitem_refund_reques.py
58
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Order.refunded_time' db.add_column('shoppingcart_order', 'refunded_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False) # Adding field 'OrderItem.refund_requested_time' db.add_column('shoppingcart_orderitem', 'refund_requested_time', self.gf('django.db.models.fields.DateTimeField')(null=True), keep_default=False) def backwards(self, orm): # Deleting field 'Order.refunded_time' db.delete_column('shoppingcart_order', 'refunded_time') # Deleting field 'OrderItem.refund_requested_time' db.delete_column('shoppingcart_orderitem', 'refund_requested_time') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'shoppingcart.certificateitem': { 'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']}, 'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'}) }, 'shoppingcart.order': { 'Meta': {'object_name': 'Order'}, 'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}), 'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}), 'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'shoppingcart.orderitem': { 'Meta': {'object_name': 'OrderItem'}, 'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}), 'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}), 'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}), 'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'shoppingcart.paidcourseregistration': { 'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}), 'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'}) }, 'shoppingcart.paidcourseregistrationannotation': { 'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'}, 'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'student.courseenrollment': { 'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['shoppingcart']
Distrotech/intellij-community
refs/heads/master
python/testData/selectWord/literal/after3.py
83
x = <selection>r"hello world again"</selection>
liuzheng712/webTeX
refs/heads/master
django_wsgi.py
1
#!/usr/bin/env python # coding: utf-8 __author__ = 'liuzheng' import os import sys reload(sys) sys.setdefaultencoding('utf8') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webTeX.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
semolex/astropylis
refs/heads/master
camera.py
1
# -*- coding: utf-8 -*- # TODO: add formulas. from decimal import Decimal, getcontext getcontext().prec = 3 class Camera(object): """ Simple class that represents user's camera for astronomy (CCD/CMOS etc). """ def __init__(self, pixel_size, resolution, sensor_h=None, sensor_w=None, megapixels=None): self.pixel_size = pixel_size self.resolution = resolution self.sensor_h = sensor_h self.sensor_w = sensor_w self.pixels_amount = resolution[0] * resolution[1] self.megapixels = megapixels or self.__guess_megapixels() def __guess_megapixels(self): return Decimal(self.pixels_amount) / 1000000 def get_arc_sec_per_pixel(self, focal_length, pixel_binning=1, numeric=False): arcsecs = Decimal(self.pixel_size / focal_length) * Decimal(206.3) * pixel_binning if numeric: return arcsecs return '{} asec/pix'.format(arcsecs) def get_focal_ratio(self, focal_length, aperture, pixel_binning=1, numeric=True): first_value = Decimal(self.pixel_size * 206.3) second_value = Decimal(aperture * self.get_arc_sec_per_pixel(focal_length=focal_length, numeric=True)) focal_ratio = first_value / second_value * pixel_binning if numeric: return focal_ratio return 'focal ratio: {}'.format(focal_ratio) def get_max_exposure(self, focal_length, crop_factor=1.0, numeric=False): exposure = 500 / focal_length * crop_factor if numeric: return exposure return '{} sec.'.format(exposure)
jeeftor/alfredToday
refs/heads/master
src/lib/pyasn1/compat/binary.py
172
from sys import version_info if version_info[0:2] < (2, 6): def bin(x): if x <= 1: return '0b'+str(x) else: return bin(x>>1) + str(x&1) else: bin = bin
ljhljh235/AutoRest
refs/heads/master
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Http/auto_rest_http_infrastructure_test_service/operations/http_retry_operations.py
14
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from .. import models class HttpRetryOperations(object): """HttpRetryOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def head408( self, custom_headers=None, raw=False, **operation_config): """Return 408 status code, then 200 after retry. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/408' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.head(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put500( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 500 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/500' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def patch500( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 500 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/500' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.patch(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get502( self, custom_headers=None, raw=False, **operation_config): """Return 502 status code, then 200 after retry. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/502' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def post503( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 503 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/503' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def delete503( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 503 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/503' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put504( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 504 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/504' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def patch504( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): """Return 504 status code, then 200 after retry. :param boolean_value: Simple boolean value true :type boolean_value: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` """ # Construct URL url = '/http/retry/504' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body if boolean_value is not None: body_content = self._serialize.body(boolean_value, 'bool') else: body_content = None # Construct and send request request = self._client.patch(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
googlearchive/titan
refs/heads/master
titan/tasks/handlers.py
1
#!/usr/bin/env python # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handlers for Titan Tasks.""" import webapp2 from titan import tasks from titan.common import handlers # SECURITY NOTE: These handlers default to "login: required" in # titan/tasks/handlers.yaml. That means they are accessible to the world, # but they rely on the hashed task manager key to be useful. # Be careful not to introduce security vulnerabilities here. # # For users of Titan Tasks, you may want to wrap these handlers in an # application-specific WSGI router which performs app-level security checks. class TaskManagerHandler(handlers.BaseHandler): """Handlers for TaskManager.""" def get(self): key = self.request.get('key') group = self.request.get('group', tasks.DEFAULT_GROUP) if not key: self.abort(400) task_manager = tasks.TaskManager(key=key, group=group) if not task_manager.exists: self.abort(404) self.write_json_response(task_manager.serialize(full=True)) class TaskManagerSubscribeHandler(handlers.BaseHandler): """Handlers for TaskManager.subscribe.""" def post(self): key = self.request.get('key') group = self.request.get('group', tasks.DEFAULT_GROUP) client_id = self.request.get('client_id') if not key or not client_id: self.abort(400) task_manager = tasks.TaskManager(key=key, group=group) if not task_manager.exists: self.abort(404) task_manager.subscribe(client_id) ROUTES = ( ('/_titan/tasks/taskmanager', TaskManagerHandler), ('/_titan/tasks/taskmanager/subscribe', TaskManagerSubscribeHandler), ) application = webapp2.WSGIApplication(ROUTES, debug=False)
ga7g08/sympy
refs/heads/master
sympy/sets/contains.py
77
from __future__ import print_function, division from sympy.core import Basic from sympy.logic.boolalg import BooleanFunction class Contains(BooleanFunction): """ Asserts that x is an element of the set S Examples ======== >>> from sympy import Symbol, Integer, S >>> from sympy.sets.contains import Contains >>> Contains(Integer(2), S.Integers) True >>> Contains(Integer(-2), S.Naturals) False >>> i = Symbol('i', integer=True) >>> Contains(i, S.Naturals) Contains(i, Naturals()) References ========== .. [1] http://en.wikipedia.org/wiki/Element_%28mathematics%29 """ @classmethod def eval(cls, x, S): from sympy.sets.sets import Set if not isinstance(x, Basic): raise TypeError if not isinstance(S, Set): raise TypeError ret = S.contains(x) if not isinstance(ret, Contains): return ret
popazerty/oe-alliance-core
refs/heads/2.4
meta-oe/recipes-devtools/python/python-process/process.py
14
import os import signal class Process(object): """Represents a process""" def __init__(self, pid): """Make a new Process object""" self.proc = "/proc/%d" % pid f = open(os.path.join(self.proc, "stat")) pid,command,state,parent_pid = f.read().strip().split()[:4] f.close() command = command[1:-1] self.pid = int(pid) self.command = command self.state = state try: self.parent_pid = int(parent_pid) except: self.parent_pid = int(0) self.parent = None self.children = [] def kill(self, sig = signal.SIGTERM): """Kill this process with SIGTERM by default""" os.kill(self.pid, sig) def __repr__(self): return "%r" % self.pid def getcwd(self): """Read the current directory of this process or None for can't""" try: return os.readlink(os.path.join(self.proc, "cwd")) except OSError: return None class ProcessList(object): """Represents a list of processes""" def __init__(self): """Read /proc and fill up the process lists""" self.by_pid = {} self.by_command = {} for f in os.listdir("/proc"): try: if f.isdigit(): process = Process(int(f)) self.by_pid[process.pid] = process self.by_command.setdefault(process.command, []).append(process) except IOError: pass for process in self.by_pid.values(): try: parent = self.by_pid[process.parent_pid] #print "child",process #print "parent",parent parent.children.append(process) process.parent = parent except KeyError: pass def named(self, name): """Returns a list of processes with the given name""" return self.by_command.get(name, [])
calancha/DIRAC
refs/heads/rel-v6r12
ResourceStatusSystem/Utilities/ComponentSynchronizer.py
8
# $HeadURL: $ """ ComponentSynchronizer Module that reads ComponentMonitoringDB.compmon_Components table and copies it to a RSS-like family of status tables to make everything easier. """ from DIRAC import gConfig, gLogger, S_OK from DIRAC.FrameworkSystem.DB.ComponentMonitoringDB import ComponentMonitoringDB from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient __RCSID__ = '$Id: $' class ComponentSynchronizer: """ ComponentSynchronizer """ def __init__( self ): """ Constructor """ self.log = gLogger.getSubLogger( self.__class__.__name__ ) self.compoDB = ComponentMonitoringDB() self.rsClient = ResourceStatusClient() def sync( self ): """ sync. Reads from ComponentsMonitoringDB and prepares entries on RSS ComponentStatus table. """ #TODO: delete from RSS if not anymore on ComponentsMonitoringDB setup = gConfig.getValue( 'DIRAC/Setup') components = self.compoDB.getComponentsStatus( { 'Setup' : setup } ) if not components[ 'OK' ]: return components components = components[ 'Value' ][ 0 ][ setup ] for agentName, agentsList in components[ 'agent' ].iteritems(): for agentDict in agentsList: if agentDict[ 'Status' ] == 'Error': self.log.warn( '%(ComponentName)s %(Message)s' % agentDict ) continue res = self.rsClient.addIfNotThereStatusElement( 'Component', 'Status', name = agentName, statusType = agentDict[ 'Host' ], status = 'Unknown', elementType = 'Agent', reason = 'Synchronized', ) if not res[ 'OK' ]: return res for serviceName, servicesList in components[ 'service' ].iteritems(): for serviceDict in servicesList: if serviceDict[ 'Status' ] == 'Error': self.log.warn( '%(ComponentName)s %(Message)s' % serviceDict ) continue res = self.rsClient.addIfNotThereStatusElement( 'Component', 'Status', name = serviceName, statusType = '%(Host)s:%(Port)s' % serviceDict, status = 'Unknown', elementType = 'Service', reason = 'Synchronized', ) if not res[ 'OK' ]: return res return S_OK() #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
ShineFan/odoo
refs/heads/8.0
openerp/addons/test_uninstall/models.py
341
# -*- coding: utf-8 -*- import openerp from openerp.osv import fields from openerp.osv.orm import Model class test_uninstall_model(Model): """ This model uses different types of columns to make it possible to test the uninstall feature of OpenERP. """ _name = 'test_uninstall.model' _columns = { 'name': fields.char('Name'), 'ref': fields.many2one('res.users', string='User'), 'rel': fields.many2many('res.users', string='Users'), } _sql_constraints = [ ('name_uniq', 'unique (name)', 'Each name must be unique.') ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
AlericInglewood/3p-google-breakpad
refs/heads/singularity
src/tools/gyp/test/dependencies/gyptest-extra-targets.py
401
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verify that dependencies don't pull unused targets into the build. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('extra_targets.gyp') # This should fail if it tries to build 'c_unused' since 'c/c.c' has a syntax # error and won't compile. test.build('extra_targets.gyp', test.ALL) test.pass_test()
Lektorium-LLC/edx-platform
refs/heads/master
lms/djangoapps/course_wiki/tests/test_access.py
19
""" Tests for wiki permissions """ from django.contrib.auth.models import Group from nose.plugins.attrib import attr from wiki.models import URLPath from course_wiki import settings from course_wiki.utils import course_wiki_slug, user_is_article_course_staff from course_wiki.views import get_or_create_root from courseware.tests.factories import InstructorFactory, StaffFactory from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory class TestWikiAccessBase(ModuleStoreTestCase): """Base class for testing wiki access.""" def setUp(self): super(TestWikiAccessBase, self).setUp() self.wiki = get_or_create_root() self.course_math101 = CourseFactory.create(org='org', number='math101', display_name='Course', metadata={'use_unique_wiki_id': 'false'}) self.course_math101_staff = self.create_staff_for_course(self.course_math101) wiki_math101 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101)) wiki_math101_page = self.create_urlpath(wiki_math101, 'Child') wiki_math101_page_page = self.create_urlpath(wiki_math101_page, 'Grandchild') self.wiki_math101_pages = [wiki_math101, wiki_math101_page, wiki_math101_page_page] self.course_math101b = CourseFactory.create(org='org', number='math101b', display_name='Course', metadata={'use_unique_wiki_id': 'true'}) self.course_math101b_staff = self.create_staff_for_course(self.course_math101b) wiki_math101b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101b)) wiki_math101b_page = self.create_urlpath(wiki_math101b, 'Child') wiki_math101b_page_page = self.create_urlpath(wiki_math101b_page, 'Grandchild') self.wiki_math101b_pages = [wiki_math101b, wiki_math101b_page, wiki_math101b_page_page] def create_urlpath(self, parent, slug): """Creates an article at /parent/slug and returns its URLPath""" return URLPath.create_article(parent, slug, title=slug) def create_staff_for_course(self, course): """Creates and returns users with instructor and staff access to course.""" return [ InstructorFactory(course_key=course.id), # Creates instructor_org/number/run role name StaffFactory(course_key=course.id), # Creates staff_org/number/run role name ] @attr(shard=1) class TestWikiAccess(TestWikiAccessBase): """Test wiki access for course staff.""" def setUp(self): super(TestWikiAccess, self).setUp() self.course_310b = CourseFactory.create(org='org', number='310b', display_name='Course') self.course_310b_staff = self.create_staff_for_course(self.course_310b) self.course_310b2 = CourseFactory.create(org='org', number='310b_', display_name='Course') self.course_310b2_staff = self.create_staff_for_course(self.course_310b2) self.wiki_310b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b)) self.wiki_310b2 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b2)) def test_no_one_is_root_wiki_staff(self): all_course_staff = self.course_math101_staff + self.course_310b_staff + self.course_310b2_staff for course_staff in all_course_staff: self.assertFalse(user_is_article_course_staff(course_staff, self.wiki.article)) def test_course_staff_is_course_wiki_staff(self): for page in self.wiki_math101_pages: for course_staff in self.course_math101_staff: self.assertTrue(user_is_article_course_staff(course_staff, page.article)) for page in self.wiki_math101b_pages: for course_staff in self.course_math101b_staff: self.assertTrue(user_is_article_course_staff(course_staff, page.article)) def test_settings(self): for page in self.wiki_math101_pages: for course_staff in self.course_math101_staff: self.assertTrue(settings.CAN_DELETE(page.article, course_staff)) self.assertTrue(settings.CAN_MODERATE(page.article, course_staff)) self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff)) self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff)) self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff)) for page in self.wiki_math101b_pages: for course_staff in self.course_math101b_staff: self.assertTrue(settings.CAN_DELETE(page.article, course_staff)) self.assertTrue(settings.CAN_MODERATE(page.article, course_staff)) self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff)) self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff)) self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff)) def test_other_course_staff_is_not_course_wiki_staff(self): for page in self.wiki_math101_pages: for course_staff in self.course_math101b_staff: self.assertFalse(user_is_article_course_staff(course_staff, page.article)) for page in self.wiki_math101_pages: for course_staff in self.course_310b_staff: self.assertFalse(user_is_article_course_staff(course_staff, page.article)) for course_staff in self.course_310b_staff: self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b2.article)) for course_staff in self.course_310b2_staff: self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b.article)) @attr(shard=1) class TestWikiAccessForStudent(TestWikiAccessBase): """Test access for students.""" def setUp(self): super(TestWikiAccessForStudent, self).setUp() self.student = UserFactory.create() def test_student_is_not_root_wiki_staff(self): self.assertFalse(user_is_article_course_staff(self.student, self.wiki.article)) def test_student_is_not_course_wiki_staff(self): for page in self.wiki_math101_pages: self.assertFalse(user_is_article_course_staff(self.student, page.article)) @attr(shard=1) class TestWikiAccessForNumericalCourseNumber(TestWikiAccessBase): """Test staff has access if course number is numerical and wiki slug has an underscore appended.""" def setUp(self): super(TestWikiAccessForNumericalCourseNumber, self).setUp() self.course_200 = CourseFactory.create(org='org', number='200', display_name='Course') self.course_200_staff = self.create_staff_for_course(self.course_200) wiki_200 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_200)) wiki_200_page = self.create_urlpath(wiki_200, 'Child') wiki_200_page_page = self.create_urlpath(wiki_200_page, 'Grandchild') self.wiki_200_pages = [wiki_200, wiki_200_page, wiki_200_page_page] def test_course_staff_is_course_wiki_staff_for_numerical_course_number(self): for page in self.wiki_200_pages: for course_staff in self.course_200_staff: self.assertTrue(user_is_article_course_staff(course_staff, page.article)) @attr(shard=1) class TestWikiAccessForOldFormatCourseStaffGroups(TestWikiAccessBase): """Test staff has access if course group has old format.""" def setUp(self): super(TestWikiAccessForOldFormatCourseStaffGroups, self).setUp() self.course_math101c = CourseFactory.create(org='org', number='math101c', display_name='Course') Group.objects.get_or_create(name='instructor_math101c') self.course_math101c_staff = self.create_staff_for_course(self.course_math101c) wiki_math101c = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101c)) wiki_math101c_page = self.create_urlpath(wiki_math101c, 'Child') wiki_math101c_page_page = self.create_urlpath(wiki_math101c_page, 'Grandchild') self.wiki_math101c_pages = [wiki_math101c, wiki_math101c_page, wiki_math101c_page_page] def test_course_staff_is_course_wiki_staff(self): for page in self.wiki_math101c_pages: for course_staff in self.course_math101c_staff: self.assertTrue(user_is_article_course_staff(course_staff, page.article))
alphagov/notify-api
refs/heads/master
migrations/versions/50_added_sender_id.py
1
"""empty message Revision ID: 50_added_sender_id Revises: 40_sent_at_column Create Date: 2015-10-19 15:32:00.951750 """ # revision identifiers, used by Alembic. revision = '50_added_sender_id' down_revision = '40_sent_at_column' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('notifications', sa.Column('sender_id', sa.String(length=255), nullable=True)) op.create_index(op.f('ix_notifications_sender_id'), 'notifications', ['sender_id'], unique=False) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_notifications_sender_id'), table_name='notifications') op.drop_column('notifications', 'sender_id') ### end Alembic commands ###
TangHao1987/intellij-community
refs/heads/master
python/helpers/pydev/pydev_runfiles_xml_rpc.py
42
import threading import traceback import warnings from _pydev_filesystem_encoding import getfilesystemencoding from pydev_imports import xmlrpclib, _queue Queue = _queue.Queue from pydevd_constants import * #This may happen in IronPython (in Python it shouldn't happen as there are #'fast' replacements that are used in xmlrpclib.py) warnings.filterwarnings( 'ignore', 'The xmllib module is obsolete.*', DeprecationWarning) file_system_encoding = getfilesystemencoding() #======================================================================================================================= # _ServerHolder #======================================================================================================================= class _ServerHolder: ''' Helper so that we don't have to use a global here. ''' SERVER = None #======================================================================================================================= # SetServer #======================================================================================================================= def SetServer(server): _ServerHolder.SERVER = server #======================================================================================================================= # ParallelNotification #======================================================================================================================= class ParallelNotification(object): def __init__(self, method, args): self.method = method self.args = args def ToTuple(self): return self.method, self.args #======================================================================================================================= # KillServer #======================================================================================================================= class KillServer(object): pass #======================================================================================================================= # ServerFacade #======================================================================================================================= class ServerFacade(object): def __init__(self, notifications_queue): self.notifications_queue = notifications_queue def notifyTestsCollected(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyTestsCollected', args)) def notifyConnected(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyConnected', args)) def notifyTestRunFinished(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyTestRunFinished', args)) def notifyStartTest(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args)) def notifyTest(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args)) #======================================================================================================================= # ServerComm #======================================================================================================================= class ServerComm(threading.Thread): def __init__(self, notifications_queue, port, daemon=False): threading.Thread.__init__(self) self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting! self.finished = False self.notifications_queue = notifications_queue import pydev_localhost # It is necessary to specify an encoding, that matches # the encoding of all bytes-strings passed into an # XMLRPC call: "All 8-bit strings in the data structure are assumed to use the # packet encoding. Unicode strings are automatically converted, # where necessary." # Byte strings most likely come from file names. encoding = file_system_encoding if encoding == "mbcs": # Windos symbolic name for the system encoding CP_ACP. # We need to convert it into a encoding that is recognized by Java. # Unfortunately this is not always possible. You could use # GetCPInfoEx and get a name similar to "windows-1251". Then # you need a table to translate on a best effort basis. Much to complicated. # ISO-8859-1 is good enough. encoding = "ISO-8859-1" self.server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port), encoding=encoding) def run(self): while True: kill_found = False commands = [] command = self.notifications_queue.get(block=True) if isinstance(command, KillServer): kill_found = True else: assert isinstance(command, ParallelNotification) commands.append(command.ToTuple()) try: while True: command = self.notifications_queue.get(block=False) #No block to create a batch. if isinstance(command, KillServer): kill_found = True else: assert isinstance(command, ParallelNotification) commands.append(command.ToTuple()) except: pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once. if commands: try: self.server.notifyCommands(commands) except: traceback.print_exc() if kill_found: self.finished = True return #======================================================================================================================= # InitializeServer #======================================================================================================================= def InitializeServer(port, daemon=False): if _ServerHolder.SERVER is None: if port is not None: notifications_queue = Queue() _ServerHolder.SERVER = ServerFacade(notifications_queue) _ServerHolder.SERVER_COMM = ServerComm(notifications_queue, port, daemon) _ServerHolder.SERVER_COMM.start() else: #Create a null server, so that we keep the interface even without any connection. _ServerHolder.SERVER = Null() _ServerHolder.SERVER_COMM = Null() try: _ServerHolder.SERVER.notifyConnected() except: traceback.print_exc() #======================================================================================================================= # notifyTest #======================================================================================================================= def notifyTestsCollected(tests_count): assert tests_count is not None try: _ServerHolder.SERVER.notifyTestsCollected(tests_count) except: traceback.print_exc() #======================================================================================================================= # notifyStartTest #======================================================================================================================= def notifyStartTest(file, test): ''' @param file: the tests file (c:/temp/test.py) @param test: the test ran (i.e.: TestCase.test1) ''' assert file is not None if test is None: test = '' #Could happen if we have an import error importing module. try: _ServerHolder.SERVER.notifyStartTest(file, test) except: traceback.print_exc() def _encode_if_needed(obj): if not IS_PY3K: if isinstance(obj, str): try: return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace')) except: return xmlrpclib.Binary(obj) elif isinstance(obj, unicode): return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace')) else: if isinstance(obj, str): return obj.encode('ISO-8859-1', 'xmlcharrefreplace') return obj #======================================================================================================================= # notifyTest #======================================================================================================================= def notifyTest(cond, captured_output, error_contents, file, test, time): ''' @param cond: ok, fail, error @param captured_output: output captured from stdout @param captured_output: output captured from stderr @param file: the tests file (c:/temp/test.py) @param test: the test ran (i.e.: TestCase.test1) @param time: float with the number of seconds elapsed ''' assert cond is not None assert captured_output is not None assert error_contents is not None assert file is not None if test is None: test = '' #Could happen if we have an import error importing module. assert time is not None try: captured_output = _encode_if_needed(captured_output) error_contents = _encode_if_needed(error_contents) _ServerHolder.SERVER.notifyTest(cond, captured_output, error_contents, file, test, time) except: traceback.print_exc() #======================================================================================================================= # notifyTestRunFinished #======================================================================================================================= def notifyTestRunFinished(total_time): assert total_time is not None try: _ServerHolder.SERVER.notifyTestRunFinished(total_time) except: traceback.print_exc() #======================================================================================================================= # forceServerKill #======================================================================================================================= def forceServerKill(): _ServerHolder.SERVER_COMM.notifications_queue.put_nowait(KillServer())
satanas/Turpial
refs/heads/development
turpial/ui/gtk/htmlview.py
3
# -*- coding: utf-8 -*- # Widget for HTML view in Turpial # # Author: Wil Alvarez (aka Satanas) import os from gi.repository import Gtk from gi.repository import GLib from gi.repository import WebKit from gi.repository import GObject class HtmlView(Gtk.VBox): __gsignals__ = { "action-request": (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE, (GObject.TYPE_STRING, )), "link-request": (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE, (GObject.TYPE_STRING, )), "load-started": (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE, ()), "load-finished": (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE, ()), } def __init__(self, coding='utf-8'): Gtk.VBox.__init__(self, False) self.coding = coding self.uri = 'file://' + os.path.dirname(__file__) self.settings = WebKit.WebSettings() self.settings.set_property('enable-default-context-menu', False) self.settings.set_property('enable-developer-extras', True) self.settings.set_property('enable-plugins', True) self.settings.set_property('enable-java_applet', False) self.settings.set_property('enable-page-cache', True) self.settings.set_property('enable-file-access-from-file-uris', True) self.settings.set_property('enable-offline-web-application_cache', False) self.settings.set_property('enable-html5-local-storage', False) self.settings.set_property('enable-html5-database', False) self.settings.set_property('enable-xss-auditor', False) try: self.settings.set_property('enable-dns-prefetching', False) except TypeError: pass self.settings.set_property('enable-caret-browsing', False) self.settings.set_property('resizable-text-areas', False) self.settings.web_security_enabled = False try: self.settings.set_property('enable-accelerated-compositing', True) except TypeError: print "No support for accelerated compositing" self.view = WebKit.WebView() self.view.set_settings(self.settings) #Added new properties in this way cause 'from' is recognized as a key word self.view.get_settings().set_property('enable-universal-access-from-file-uris', True) self.view.connect('load-started', self.__started) self.view.connect('load-finished', self.__finished) self.view.connect('console-message', self.__console_message) self.view.connect('navigation-policy-decision-requested', self.__process) self.view.connect('new-window-policy-decision-requested', self.__on_new_window_requested); scroll = Gtk.ScrolledWindow() scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER) scroll.set_shadow_type(Gtk.ShadowType.IN) scroll.add(self.view) self.pack_start(scroll, True, True, 0) def __on_new_window_requested(self, view, frame, request, decision, u_data): self.emit('link-request', request.get_uri()) def __console_message(self, view, message, line, source_id, data=None): #print "%s <%s:%i>" % (message, source_id, line) print "%s" % message return True def __process(self, view, frame, request, action, policy, data=None): url = request.get_uri() if url is None: pass elif url.startswith('cmd:'): policy.ignore() self.emit('action-request', url[4:]) elif url.startswith('link:'): policy.ignore() self.emit('link-request', url[5:]) policy.use() def __started(self, widget, frame): self.emit('load-started') def __finished(self, widget, frame): self.emit('load-finished') def load(self, url): GLib.idle_add(self.view.load_uri, url) def render(self, html): GLib.idle_add(self.view.load_string, html, "text/html", self.coding, self.uri) def execute(self, script): script = script.replace('\n', ' ') self.view.execute_script(script) def stop(self): self.view.stop_loading() GObject.type_register(HtmlView)
Gabriel0402/zulip
refs/heads/master
zerver/management/commands/check_redis.py
116
from __future__ import absolute_import from zerver.models import get_user_profile_by_id from zerver.lib.rate_limiter import client, max_api_calls, max_api_window from django.core.management.base import BaseCommand from django.conf import settings from optparse import make_option import time, logging class Command(BaseCommand): help = """Checks redis to make sure our rate limiting system hasn't grown a bug and left redis with a bunch of data Usage: ./manage.py [--trim] check_redis""" option_list = BaseCommand.option_list + ( make_option('-t', '--trim', dest='trim', default=False, action='store_true', help="Actually trim excess"), ) def _check_within_range(self, key, count_func, trim_func): user_id = int(key.split(':')[1]) try: user = get_user_profile_by_id(user_id) except: user = None max_calls = max_api_calls(user=user) age = int(client.ttl(key)) if age < 0: logging.error("Found key with age of %s, will never expire: %s" % (age, key,)) count = count_func() if count > max_calls: logging.error("Redis health check found key with more elements \ than max_api_calls! (trying to trim) %s %s" % (key, count)) if self.trim: client.expire(key, max_api_window(user=user)) trim_func(key, max_calls) def handle(self, *args, **options): if not settings.RATE_LIMITING: print "This machine is not using redis or rate limiting, aborting" exit(1) # Find all keys, and make sure they're all within size constraints wildcard_list = "ratelimit:*:*:list" wildcard_zset = "ratelimit:*:*:zset" self.trim = options['trim'] lists = client.keys(wildcard_list) for list_name in lists: self._check_within_range(list_name, lambda: client.llen(list_name), lambda key, max_calls: client.ltrim(key, 0, max_calls - 1)) zsets = client.keys(wildcard_zset) for zset in zsets: now = time.time() # We can warn on our zset being too large, but we don't know what # elements to trim. We'd have to go through every list item and take # the intersection. The best we can do is expire it self._check_within_range(zset, lambda: client.zcount(zset, 0, now), lambda key, max_calls: None)
hojel/calibre
refs/heads/master
src/calibre/gui2/convert/toc.py
14
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from calibre.gui2.convert.toc_ui import Ui_Form from calibre.gui2.convert import Widget from calibre.gui2 import error_dialog from calibre.utils.localization import localize_user_manual_link class TOCWidget(Widget, Ui_Form): TITLE = _('Table of\nContents') ICON = I('series.png') HELP = _('Control the creation/conversion of the Table of Contents.') COMMIT_NAME = 'toc' def __init__(self, parent, get_option, get_help, db=None, book_id=None): Widget.__init__(self, parent, ['level1_toc', 'level2_toc', 'level3_toc', 'toc_threshold', 'max_toc_links', 'no_chapters_in_toc', 'use_auto_toc', 'toc_filter', 'duplicate_links_in_toc', ] ) self.db, self.book_id = db, book_id self.initialize_options(get_option, get_help, db, book_id) self.opt_level1_toc.set_msg(_('Level &1 TOC (XPath expression):')) self.opt_level2_toc.set_msg(_('Level &2 TOC (XPath expression):')) self.opt_level3_toc.set_msg(_('Level &3 TOC (XPath expression):')) try: self.help_label.setText(self.help_label.text() % localize_user_manual_link( 'http://manual.calibre-ebook.com/conversion.html#table-of-contents')) except TypeError: pass # link already localized def pre_commit_check(self): for x in ('level1', 'level2', 'level3'): x = getattr(self, 'opt_'+x+'_toc') if not x.check(): error_dialog(self, _('Invalid XPath'), _('The XPath expression %s is invalid.')%x.text).exec_() return False return True
ethanbao/api-client-staging-1
refs/heads/master
generated/python/gapic-google-longrunning-v1/docs/conf.py
3
# -*- coding: utf-8 -*- # # gapic-google-longrunning documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) __version__ = '0.11.1' # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', ] # autodoc/autosummary flags autoclass_content = 'both' autodoc_default_flags = ['members'] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'gapic-google-longrunning' copyright = u'2016, Google' author = u'Google APIs' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = '.'.join(release.split('.')[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'gapic-google-longrunning-doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'gapic-google-longrunning.tex', u'gapic-google-longrunning Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'gapic-google-longrunning', u'gapic-google-longrunning Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'gapic-google-longrunning', u'gapic-google-longrunning Documentation', author, 'gapic-google-longrunning', 'GAPIC library for the google (api.version) service', 'APIs'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('http://python.readthedocs.org/en/latest/', None), 'gax': ('https://gax-python.readthedocs.org/en/latest/', None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
KiranJKurian/XScheduler
refs/heads/master
venv/lib/python2.7/site-packages/pyasn1/compat/__init__.py
3653
# This file is necessary to make this directory a package.
osmocom/osmo-bts
refs/heads/master
contrib/dump_docs.py
1
#!/usr/bin/env python """ Start the process and dump the documentation to the doc dir """ import socket, subprocess, time,os env = os.environ env['L1FWD_BTS_HOST'] = '127.0.0.1' bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote", "-c", "./doc/examples/sysmo/osmo-bts-sysmo.cfg"], env = env, stdin=None, stdout=None) time.sleep(1) try: sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sck.setblocking(1) sck.connect(("localhost", 4241)) sck.recv(4096) # Now send the command sck.send("show online-help\r") xml = "" while True: data = sck.recv(4096) xml = "%s%s" % (xml, data) if data.endswith('\r\nOsmoBTS> '): break # Now write everything until the end to the file out = open('doc/vty_reference.xml', 'w') out.write(xml[18:-11]) out.close() finally: # Clean-up bts_proc.kill() bts_proc.wait()
mnowster/compose
refs/heads/master
compose/cli/docopt_command.py
17
from __future__ import absolute_import from __future__ import unicode_literals import sys from inspect import getdoc from docopt import docopt from docopt import DocoptExit def docopt_full_help(docstring, *args, **kwargs): try: return docopt(docstring, *args, **kwargs) except DocoptExit: raise SystemExit(docstring) class DocoptCommand(object): def docopt_options(self): return {'options_first': True} def sys_dispatch(self): self.dispatch(sys.argv[1:], None) def dispatch(self, argv, global_options): self.perform_command(*self.parse(argv, global_options)) def parse(self, argv, global_options): options = docopt_full_help(getdoc(self), argv, **self.docopt_options()) command = options['COMMAND'] if command is None: raise SystemExit(getdoc(self)) handler = self.get_handler(command) docstring = getdoc(handler) if docstring is None: raise NoSuchCommand(command, self) command_options = docopt_full_help(docstring, options['ARGS'], options_first=True) return options, handler, command_options def get_handler(self, command): command = command.replace('-', '_') if not hasattr(self, command): raise NoSuchCommand(command, self) return getattr(self, command) class NoSuchCommand(Exception): def __init__(self, command, supercommand): super(NoSuchCommand, self).__init__("No such command: %s" % command) self.command = command self.supercommand = supercommand
li6xiang/ts_push
refs/heads/master
.vim/syntax_checkers/python/codec.py
97
#!/usr/bin/env python from __future__ import print_function from sys import argv, exit import codecs import re import os if len(argv) != 2: exit(1) try: with open(argv[1]) as fle: text = fle.readlines() if text: match = re.match(r"#\s*coding\s*:\s*(?P<coding>\w+)", text[0]) if match: text = codecs.lookup(match.groupdict()["coding"]).incrementaldecoder().decode( ''.join(text).encode('utf-8')).encode('utf-8') if isinstance(text, list): text = ''.join(text).encode('utf-8') compile(text, argv[1], 'exec', 0, 1) except SyntaxError as err: print('%s:%s:%s: %s' % (err.filename, err.lineno, err.offset, err.msg)) except Exception as err: print('%s:%s:%s: %s' % (os.path.abspath(argv[1]), 1, 0, err))
hellsgate1001/bookit
refs/heads/master
docs/env/Lib/site-packages/django/contrib/sessions/middleware.py
215
import time from django.conf import settings from django.utils.cache import patch_vary_headers from django.utils.http import cookie_date from django.utils.importlib import import_module class SessionMiddleware(object): def process_request(self, request): engine = import_module(settings.SESSION_ENGINE) session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None) request.session = engine.SessionStore(session_key) def process_response(self, request, response): """ If request.session was modified, or if the configuration is to save the session every time, save the changes and set a session cookie. """ try: accessed = request.session.accessed modified = request.session.modified except AttributeError: pass else: if accessed: patch_vary_headers(response, ('Cookie',)) if modified or settings.SESSION_SAVE_EVERY_REQUEST: if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = cookie_date(expires_time) # Save the session data and refresh the client cookie. # Skip session save for 500 responses, refs #3881. if response.status_code != 500: request.session.save() response.set_cookie(settings.SESSION_COOKIE_NAME, request.session.session_key, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None) return response
ntt-sic/cinder
refs/heads/master
cinder/tests/test_conf.py
3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from cinder import test CONF = cfg.CONF CONF.register_opt(cfg.StrOpt('conf_unittest', default='foo', help='for testing purposes only')) class ConfigTestCase(test.TestCase): def setUp(self): super(ConfigTestCase, self).setUp() def test_declare(self): self.assertNotIn('answer', CONF) CONF.import_opt('answer', 'cinder.tests.declare_conf') self.assertIn('answer', CONF) self.assertEqual(CONF.answer, 42) # Make sure we don't overwrite anything CONF.set_override('answer', 256) self.assertEqual(CONF.answer, 256) CONF.import_opt('answer', 'cinder.tests.declare_conf') self.assertEqual(CONF.answer, 256) def test_runtime_and_unknown_conf(self): self.assertNotIn('runtime_answer', CONF) import cinder.tests.runtime_conf self.assertIn('runtime_answer', CONF) self.assertEqual(CONF.runtime_answer, 54) def test_long_vs_short_conf(self): CONF.clear() CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) CONF.register_cli_opt(cfg.IntOpt('duplicate_answer', default=50, help='desc')) argv = ['--duplicate_answer=60'] CONF(argv, default_config_files=[]) self.assertEqual(CONF.duplicate_answer, 60) self.assertEqual(CONF.duplicate_answer_long, 'val') def test_conf_leak_left(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') def test_conf_leak_right(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') def test_conf_overrides(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') CONF.reset() self.assertEqual(CONF.conf_unittest, 'foo')
Jopie64/cppformat
refs/heads/master
support/update-converity-branch.py
2
#!/usr/bin/env python # Update the coverity branch from the master branch. # It is not done automatically because Coverity Scan limits # the number of submissions per day. from __future__ import print_function import shutil, tempfile from subprocess import check_output, STDOUT class Git: def __init__(self, dir): self.dir = dir def __call__(self, *args): output = check_output(['git'] + list(args), cwd=self.dir, stderr=STDOUT) print(output) return output dir = tempfile.mkdtemp() try: git = Git(dir) git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir) output = git('merge', '-X', 'theirs', '--no-commit', 'origin/master') if 'Fast-forward' not in output: git('reset', 'HEAD', '.travis.yml') git('checkout', '--', '.travis.yml') git('commit', '-m', 'Update coverity branch') git('push') finally: shutil.rmtree(dir)
aopp-pred/rpe
refs/heads/master
generator/python/rpgen/operators.py
1
"""Fortran operator definitions.""" # Copyright 2015 Andrew Dawson, Peter Dueben # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import (absolute_import, print_function) from collections import namedtuple from ._io import from_json as _from_json from .types import get_fortran_type #: A Fortran operator. FortranOperator = namedtuple('FortranOperator', ('name', 'operator', 'operator_categories', 'return_type')) def _is_valid_operator_type(type_name): if type_name.lower() not in ('unary', 'binary'): raise ValueError('Invalid operator type: "{}"'.format(type_name)) return type_name.lower() def _operator_from_json(json_object): (name, defn), = json_object.items() try: operator = defn['operator'] operator_categories = \ [t for t in map(lambda x: x.lower(), defn['operator_categories']) if _is_valid_operator_type(t)] return_type = get_fortran_type(defn['return_type']) except KeyError: raise ValueError('The JSON definition of the operator "{}" ' 'is malformed'.format(name)) return FortranOperator(name, operator, operator_categories, return_type) def from_json(json_file): return _from_json(json_file, 'operators', _operator_from_json)
graphite-project/carbon
refs/heads/master
lib/carbon/tests/test_util.py
2
import platform import socket import unittest from carbon.util import parseDestinations from carbon.util import enableTcpKeepAlive from carbon.util import TaggedSeries class UtilTest(unittest.TestCase): @unittest.skipIf(platform.system() == 'Darwin', "test_enable_tcp_keep_alive broken on MacOS") def test_enable_tcp_keep_alive(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) class _Transport(): def getHandle(self): return s def setTcpKeepAlive(self, value): s.setsockopt(socket.SOL_TCP, socket.SO_KEEPALIVE, value) enableTcpKeepAlive(_Transport(), True, None) self.assertEquals(s.getsockopt(socket.SOL_TCP, socket.SO_KEEPALIVE), 1) def test_sanitizing_name_as_tag_value(self): test_cases = [ { 'original': "my~.test.abc", 'expected': "my~.test.abc", }, { 'original': "a.b.c", 'expected': "a.b.c", }, { 'original': "~~a~~.~~~b~~~.~~~c~~~", 'expected': "a~~.~~~b~~~.~~~c~~~", }, { 'original': "a.b.c~", 'expected': "a.b.c~", }, { 'original': "~a.b.c", 'expected': "a.b.c", }, { 'original': "~a~", 'expected': "a~", }, { 'original': "~~~", 'raises': True, }, { 'original': "~", 'raises': True, }, ] for test_case in test_cases: if test_case.get('raises', False): self.assertRaises( Exception, TaggedSeries.sanitize_name_as_tag_value, test_case['original'], ) else: result = TaggedSeries.sanitize_name_as_tag_value(test_case['original']) self.assertEquals(result, test_case['expected']) def test_validate_tag_key_and_value(self): # assert that it raises exception when sanitized name is still not valid with self.assertRaises(Exception): # sanitized name is going to be '', which is not a valid tag value TaggedSeries.sanitize_name_as_tag_value('~~~~') with self.assertRaises(Exception): # given tag value is invalid because it has length 0 TaggedSeries.validateTagAndValue('metric.name;tag=') with self.assertRaises(Exception): # given tag key is invalid because it has length 0 TaggedSeries.validateTagAndValue('metric.name;=value') with self.assertRaises(Exception): # given tag is missing = TaggedSeries.validateTagAndValue('metric.name;tagvalue') with self.assertRaises(Exception): # given tag value is invalid because it starts with ~ TaggedSeries.validateTagAndValue('metric.name;tag=~value') with self.assertRaises(Exception): # given tag key is invalid because it contains ! TaggedSeries.validateTagAndValue('metric.name;ta!g=value') # Destinations have the form: # <host> ::= <string without colons> | "[" <string> "]" # <port> ::= <number> # <instance> ::= <string> # <destination> ::= <host> ":" <port> | <host> ":" <port> ":" <instance> class ParseDestinationsTest(unittest.TestCase): def test_valid_dest_unbracketed(self): # Tests valid destinations in the unbracketed form of <host>. dests = [ "127.0.0.1:1234:alpha", # Full IPv4 address "127.1:1234:beta", # 'Short' IPv4 address "localhost:987:epsilon", # Relative domain name "foo.bar.baz.uk.:890:sigma" # Absolute domain name ] expected = [ ("127.0.0.1", 1234, "alpha"), ("127.1", 1234, "beta"), ("localhost", 987, "epsilon"), ("foo.bar.baz.uk.", 890, "sigma") ] actual = parseDestinations(dests) self.assertEquals(len(expected), len(actual)) for exp, act in zip(expected, actual): self.assertEquals(exp, act) def test_valid_dest_bracketed(self): # Tests valid destinations in the bracketed form of <host>. dests = [ "[fe80:dead:beef:cafe:0007:0007:0007:0001]:123:gamma", # Full IPv6 address "[fe80:1234::7]:456:theta", # Compact IPv6 address "[::]:1:o", # Very compact IPv6 address "[ffff::127.0.0.1]:789:omicron" # IPv6 mapped IPv4 address ] expected = [ ("fe80:dead:beef:cafe:0007:0007:0007:0001", 123, "gamma"), ("fe80:1234::7", 456, "theta"), ("::", 1, "o"), ("ffff::127.0.0.1", 789, "omicron"), ] actual = parseDestinations(dests) self.assertEquals(len(expected), len(actual)) for exp, act in zip(expected, actual): self.assertEquals(exp, act) def test_valid_dest_without_instance(self): # Tests destinations without instance specified. dests = [ "1.2.3.4:5678", "[::1]:2", "stats.example.co.uk:8125", "[127.0.0.1]:78", # Odd use of the bracket feature, but why not? "[why.not.this.com]:89", ] expected = [ ("1.2.3.4", 5678, None), ("::1", 2, None), ("stats.example.co.uk", 8125, None), ("127.0.0.1", 78, None), ("why.not.this.com", 89, None) ] actual = parseDestinations(dests) self.assertEquals(len(expected), len(actual)) for exp, act in zip(expected, actual): self.assertEquals(exp, act) def test_wrong_dest(self): # Some cases of invalid input, e.g. invalid/missing port. dests = [ "1.2.3.4", # No port "1.2.3.4:huh", # Invalid port (must be int) "[fe80::3285:a9ff:fe91:e287]", # No port "[ffff::1.2.3.4]:notaport" # Invalid port ] for dest in dests: try: parseDestinations([dest]) except ValueError: continue raise AssertionError("Invalid input was accepted.")
MattDevo/edk2
refs/heads/master
BaseTools/Source/Python/UPT/Object/Parser/__init__.py
2
## @file # Python 'Object' package initialization file. # # This file is required to make Python interpreter treat the directory # as containing package. # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials are licensed and made available # under the terms and conditions of the BSD License which accompanies this # distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ''' PARSER '''
xfournet/intellij-community
refs/heads/master
python/lib/Lib/site-packages/django/contrib/gis/geos/coordseq.py
411
""" This module houses the GEOSCoordSeq object, which is used internally by GEOSGeometry to house the actual coordinates of the Point, LineString, and LinearRing geometries. """ from ctypes import c_double, c_uint, byref from django.contrib.gis.geos.base import GEOSBase, numpy from django.contrib.gis.geos.error import GEOSException, GEOSIndexError from django.contrib.gis.geos.libgeos import CS_PTR from django.contrib.gis.geos import prototypes as capi class GEOSCoordSeq(GEOSBase): "The internal representation of a list of coordinates inside a Geometry." ptr_type = CS_PTR #### Python 'magic' routines #### def __init__(self, ptr, z=False): "Initializes from a GEOS pointer." if not isinstance(ptr, CS_PTR): raise TypeError('Coordinate sequence should initialize with a CS_PTR.') self._ptr = ptr self._z = z def __iter__(self): "Iterates over each point in the coordinate sequence." for i in xrange(self.size): yield self[i] def __len__(self): "Returns the number of points in the coordinate sequence." return int(self.size) def __str__(self): "Returns the string representation of the coordinate sequence." return str(self.tuple) def __getitem__(self, index): "Returns the coordinate sequence value at the given index." coords = [self.getX(index), self.getY(index)] if self.dims == 3 and self._z: coords.append(self.getZ(index)) return tuple(coords) def __setitem__(self, index, value): "Sets the coordinate sequence value at the given index." # Checking the input value if isinstance(value, (list, tuple)): pass elif numpy and isinstance(value, numpy.ndarray): pass else: raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).') # Checking the dims of the input if self.dims == 3 and self._z: n_args = 3 set_3d = True else: n_args = 2 set_3d = False if len(value) != n_args: raise TypeError('Dimension of value does not match.') # Setting the X, Y, Z self.setX(index, value[0]) self.setY(index, value[1]) if set_3d: self.setZ(index, value[2]) #### Internal Routines #### def _checkindex(self, index): "Checks the given index." sz = self.size if (sz < 1) or (index < 0) or (index >= sz): raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index)) def _checkdim(self, dim): "Checks the given dimension." if dim < 0 or dim > 2: raise GEOSException('invalid ordinate dimension "%d"' % dim) #### Ordinate getting and setting routines #### def getOrdinate(self, dimension, index): "Returns the value for the given dimension and index." self._checkindex(index) self._checkdim(dimension) return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double())) def setOrdinate(self, dimension, index, value): "Sets the value for the given dimension and index." self._checkindex(index) self._checkdim(dimension) capi.cs_setordinate(self.ptr, index, dimension, value) def getX(self, index): "Get the X value at the index." return self.getOrdinate(0, index) def setX(self, index, value): "Set X with the value at the given index." self.setOrdinate(0, index, value) def getY(self, index): "Get the Y value at the given index." return self.getOrdinate(1, index) def setY(self, index, value): "Set Y with the value at the given index." self.setOrdinate(1, index, value) def getZ(self, index): "Get Z with the value at the given index." return self.getOrdinate(2, index) def setZ(self, index, value): "Set Z with the value at the given index." self.setOrdinate(2, index, value) ### Dimensions ### @property def size(self): "Returns the size of this coordinate sequence." return capi.cs_getsize(self.ptr, byref(c_uint())) @property def dims(self): "Returns the dimensions of this coordinate sequence." return capi.cs_getdims(self.ptr, byref(c_uint())) @property def hasz(self): """ Returns whether this coordinate sequence is 3D. This property value is inherited from the parent Geometry. """ return self._z ### Other Methods ### def clone(self): "Clones this coordinate sequence." return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz) @property def kml(self): "Returns the KML representation for the coordinates." # Getting the substitution string depending on whether the coordinates have # a Z dimension. if self.hasz: substr = '%s,%s,%s ' else: substr = '%s,%s,0 ' return '<coordinates>%s</coordinates>' % \ ''.join([substr % self[i] for i in xrange(len(self))]).strip() @property def tuple(self): "Returns a tuple version of this coordinate sequence." n = self.size if n == 1: return self[0] else: return tuple([self[i] for i in xrange(n)])
jjkoletar/panda3d
refs/heads/master
direct/src/http/webAIInspector.py
11
"""This is a web based inspector for the AI System. It can be accessed via http://hostname.domain:port/inspect The hostname.domain would of course be the computer that the AI is running on. The port will need to be defined when the instance is inited. """ import string, time, direct, inspect, socket from operator import itemgetter from direct.http import WebRequest from socket import gethostname from direct.task.Task import Task from sys import platform from pirates.uberdog.AIMagicWordTrade import AIMagicWordTrade from pirates.quest.QuestDB import QuestDict # Need to figure out which systeminfo module to import if platform == 'win32': from windowsSystemInfo import SystemInformation else: from linuxSystemInfo import SystemInformation class aiWebServer(SystemInformation): def __init__(self, air, listenPort=8080): SystemInformation.__init__(self) self.listenPort = listenPort self.air = simbase.air # self.taskMgr = Task.TaskManager() if __debug__: print "Listen port set to: %d" % self.listenPort # Start dispatcher self.web = WebRequest.WebRequestDispatcher() self.web.listenOnPort(self.listenPort) self.localHostName = gethostname() self.web.registerGETHandler('inspect', self.inspect) self.web.registerGETHandler('systemInfo', self.systemInfo) self.web.registerGETHandler('oMenu', self.oMenu) self.web.registerGETHandler('oType', self.oType) self.web.registerGETHandler('oInst', self.oInst) self.web.registerGETHandler('blank', self.blank) self.web.registerGETHandler('magicWord', self.magicWord) self.startCheckingIncomingHTTP() self.air.setConnectionURL("http://%s:%s/" % (socket.gethostbyname(socket.gethostname()),self.HTTPListenPort)) def magicWord(self, replyTo, **kw): # This will process Magic Word requests # Currently the following words are supported: # ~aiobjectcount # ~aitaskmgr # ~aijobmgr # ~assignQuest # ~money # First we need to figure out which magic word is being called try: theMagicWord = kw['magicWord'] except KeyError: # MagicWord issue. Malformed URL replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Magic Word Error</title>\n</head><body>Please check the URL. Transaction could not be completed. Malformed URL.</BODY>\n</HTML>') return # Next we execute the magic word request if theMagicWord == 'aiobjectcount': replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>%s</title>\n</head><body><PRE>%s</PRE></body>\n</HTML>' % (theMagicWord, simbase.air.webPrintObjectCount())) return elif theMagicWord == 'aitaskmgr': replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>%s</title>\n</head><body><PRE>%s</PRE></body>\n</HTML>' % (theMagicWord, taskMgr)) return elif theMagicWord == 'aijobmgr': replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>%s</title>\n</head><body><PRE>%s</PRE></body>\n</HTML>' % (theMagicWord, jobMgr)) elif theMagicWord == 'money': # First, generate the Avatar HTML Select widget. selectWidget = self.genAvSelect() # Now that we've built the avatar list, we can repond with the HTML replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Money</title>\n</head><body><form method="get" action="magicWord" name="magicWord">AvatarID: %s\nAmmount: <input maxlength="3" size="3" name="amount" value="100"><br><INPUT TYPE=HIDDEN NAME="magicWord" value="MONEY_ADD"><button value="Submit" name="Submit"></button><br></form></body>\n</HTML>' % selectWidget) elif theMagicWord == 'MONEY_ADD': av = kw['avatarId'] count = kw['amount'] try: av = int(av) count = int(count) except ValueError: # One or both of the two args could not be converted into a int # This being the case, the transaction mut be stopped. # The most likely cause is the input of a non num type into # the amount field print 'Incorrect value entered.' replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Money Error</title>\n</head><body>Please check the Amount field. Transaction could not be completed.</BODY>\n</HTML>') return try: av = simbase.air.doId2do[av] except KeyError: replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Money Error</title>\n</head><body>Please check the AvatarID field; the Avatar might have logged out. Transaction could not be completed.</BODY>\n</HTML>') return curGold = av.getInventory().getGoldInPocket() # print "Debug: Args being passed to AIMAgicWordTrade:\t%s" % av trade = AIMagicWordTrade(av, av.getDoId(), avatarId = av.getDoId()) if count > curGold: trade.giveGoldInPocket(count - curGold) else: trade.takeGoldInPocket(curGold - count) trade.sendTrade() # I don't think I need to issue a tradeRejected or # tradeSucceesed call here. replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Money Modified</title>\n</head><body>Transaction complete.</BODY>\n</HTML>') return elif theMagicWord == 'assignQuest': avSelectWidget = self.genAvSelect() questSelectWidget = self.genQuestSelect() # Present HTML menu with options replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>AssignQuest</title>\n</head><body><form method="get" action="magicWord" name="magicWord">AvatarID: %s\nQuest to Assign: %s<br><INPUT TYPE=HIDDEN NAME="magicWord" value="QUEST_ADD"><button value="Submit" name="Submit"></button><br></form></body>\n</HTML>' % (avSelectWidget, questSelectWidget)) elif theMagicWord == 'QUEST_ADD': av = kw['avatarId'] av = int(av) questId = kw['questId'] # print 'Avatarid = %s\nQuestID = %s' % (av, questId) try: av = simbase.air.doId2do[av] except KeyError: replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Money Error</title>\n</head><body>Please check the AvatarID field; the Avatar might have logged out. Transaction could not be completed.</BODY>\n</HTML>') return av.assignQuest(questId) replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Quest Assigned</title>\n</head><body>The avatar with id: %s<BR>Has been assigned Quest: %s</body>\n</HTML>' % (kw['avatarId'], questId)) return else: # No word Matches replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>No Word Matches</title>\n</head><body>The Magic word provided does not exist or is not accessable via the web interface at this time.</body>\n</HTML>') return def timeStamp(self): # Returns the local time in the following string format: # Month-Day-Year Hour:Minute:Seconds # Example: 09-17-2007 15:36:04 return time.strftime("%m-%d-%Y %H:%M:%S", time.localtime()) def oMenu(self, replyTo, **kw): # Menu listing Magic words and Raw object list (all HTML links) replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Menu Options</title>\n</head><body>Magic Words:<BR><UL><LI><A HREF="magicWord?magicWord=money" TARGET="oInst">Money</a><LI><A HREF="magicWord?magicWord=assignQuest" TARGET="oInst">AssignQuest</A>\n<LI><A HREF="magicWord?magicWord=aijobmgr" TARGET="oInst">AIjobMgr</A>\n<LI><A HREF="magicWord?magicWord=aitaskmgr" TARGET="oInst">AITaskMgr</a><LI><A HREF="magicWord?magicWord=aiobjectcount" TARGET="oInst">AIObjectCount</A>\n</UL><P><A HREF="oType" TARGET="oType">Raw Object List</a></body>\n</HTML>') return def genAvSelect(self): # We will need to populate HTML FORM menus to make this work. # We will need to provide a list of Avatars on the AI # along with a field to allow an int value to be sent # First, we need to get a dict of DistributedPlayerPirateAI's playerPirates = [] objList = self.generateSortedIDList() objList.reverse() while objList: tempObjElement = objList.pop() if str(tempObjElement[0]).find('DistributedPlayerPirateAI') != -1: playerPirates.append(tempObjElement[1]) # OK, now playerPirates should be a list of avatar ids # We should build a HTML select widget with the new list selectWidget = '<select name="avatarId">\n' while playerPirates: selectWidget = '%s<option>%s</option>\n' % (selectWidget, str(playerPirates.pop())) selectWidget = '%s</select><br>\n' % selectWidget return selectWidget def genQuestSelect(self): # Will generate an HTML select widget, with the Key vals from the QuestDB selectWidget = '<select name="questId">\n' for k, v in QuestDict.iteritems(): selectWidget = '%s<option>%s</option>\n' % (selectWidget, k) selectWidget = '%s</select><br>\n' % selectWidget return selectWidget def blank(self, replyTo, **kw): # This simple generates a blank page for the middle and right # frames;( for when the page is first accessed) replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>Word not found</title>\n</head><body></body>\n</HTML>') def oInst(self, replyTo, **kw): # This will populate the middle frame with list of the members of # the object selected in the left frame #print "%s|oInst Frame Accessed, Request ID %s" % (self.timeStamp(), str(kw)) head = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<title>member List</title>\n</head>\n<body>\n<UL>' foot = '</ul></body></HTML>' body = '' doIdRequested = '' for j, k in kw.iteritems(): doIdRequested = int(k) #print j,k try: memberList = inspect.getmembers(simbase.air.doId2do[doIdRequested]) except KeyError: replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<TITLE>OBJ Gone</title>\n</head><body>The object is no longer on the system</body>\n</HTML>') return memberList.sort() memberList.reverse() while memberList: tempMember = memberList.pop() if (type(tempMember[1]) == str or type(tempMember[1]) == int or type(tempMember[1]) == float or type(tempMember[1]) == dict): body = '%s<LI>%s\n' % (body, str(tempMember)) replyTo.respond('%s%s%s' % (head,body,foot)) def oType(self, replyTo, **kw): # This will populate the left frame with a alpha sorted list of # objects. # print "%s|oType Frame Accessed" % self.timeStamp() head = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<title>Object List</title>\n</head>\n<body>\n<UL>' foot = '</ul></body></HTML>' objList = self.generateSortedIDList() # Need to sort objList by second col (the doid) objList = sorted(objList, key=itemgetter(1)) objList.reverse() body = '' # Pop off the Null entry while objList: tempObjElement = objList.pop() # tempObjElement[0].replace('<','') # tempObjElement[0].replace('>','') # if str(tempObjElement[0]).find('render') == -1: body = '%s<LI><A HREF="oInst?id=%s" target="oInst">%s:%s</A>\n' % (body, tempObjElement[1], tempObjElement[1], str(tempObjElement[0]).replace('<','').replace('>','')) replyTo.respond('%s%s%s' % (head,body,foot)) def inspect(self, replyTo, **kw): # This is the index. Basically, it will generate the frames for the # other functions to populate: systemInfo, oType, oInst, oAttrib # Three frames on the bottom row # frameset = '<frameset rows="35\%,65\%">\n<frame src="systemInfo" name="systemInfo" frameborder=1>\n<frameset cols="25\%,25\%,50\%">\n<frame src="oType" name="oType" frameborder=1>\n<frame src="blank" name="oInst" frameborder=1>\n<frame src="blank" name="oAttrib" frameborder=1>\n</frameset>\n</frameset>\n</html>' # Two Frames on the bottom row frameset = '<frameset rows="35\%,65\%">\n<frame src="systemInfo" name="systemInfo" frameborder=1>\n<frameset cols="50\%,50\%">\n<frame src="oMenu" name="oType" frameborder=1>\n<frame src="blank" name="oInst" frameborder=1>\n</frameset>\n</frameset>\n</html>' #print "%s|Index Frame Accessed" % self.timeStamp() # print str(simbase.air.doid2do) replyTo.respond('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">\n<html lang="en">\n<head>\n<title>AI HTTP Interface: %s</title>\n</head>\n%s' % (self.localHostName, frameset)) def systemInfo(self, replyTo, **kw): # This is the contents of the top frame; i.e. system information self.refresh() #print "%s|SystemInfo Frame Accessed" % self.timeStamp() replyTo.respond('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">\n<title>System Info</title>\n</head>\n<body>\n<center><table style="text-align: left; width: 443px; height: 128px;" border="1" cellpadding="2" cellspacing="2">\n<tbody>\n<tr>\n<td style="text-align: center;" colspan="4">Hostname: %s<br>\nOperating System: %s<br>\nCPU: %s</td>\n</tr>\n<tr>\n<td>Total RAM:</td>\n<td>%d</td>\n<td>Total VM</td>\n<td>%d</td>\n</tr>\n<tr>\n<td>Available RAM:</td>\n<td>%d</td>\n<td>Available VM</td>\n<td>%d</td>\n</tr>\n</tbody>\n</table></center>\n</body>\n</html>' % (self.localHostName, self.os, self.cpu, self.totalRAM, self.totalVM, self.availableRAM, self.availableVM)) def startCheckingIncomingHTTP(self): taskMgr.remove('pollHTTPTask') taskMgr.doMethodLater(0.3,self.pollHTTPTask,'pollHTTPTask') def stopCheckingIncomingHTTP(self): taskMgr.remove('pollHTTPTask') def pollHTTPTask(self,task): """ Task that polls the HTTP server for new requests. """ # print 'Polling...' self.web.poll() #taskMgr.doMethodLater(0.3,self.pollHTTPTask,'pollHTTPTask') return Task.again def generateSortedIDList(self): # looks at the simbase.air.doID2do dict, and returns a list # sorted by alpha order. IDlist = [] for key, val in simbase.air.doId2do.iteritems(): IDlist.append([val,key]) IDlist.sort() return IDlist def inspectObject(anObject): inspector = inspectorFor(anObject) # inspectorWindow = InspectorWindow(inspector) # inspectorWindow.open() # return inspectorWindow return inspector ### private def inspectorFor(anObject): typeName = string.capitalize(type(anObject).__name__) + 'Type' if typeName in _InspectorMap: inspectorName = _InspectorMap[typeName] else: print "Can't find an inspector for " + typeName inspectorName = 'Inspector' inspector = globals()[inspectorName](anObject) return inspector def initializeInspectorMap(): global _InspectorMap notFinishedTypes = ['BufferType', 'EllipsisType', 'FrameType', 'TracebackType', 'XRangeType'] _InspectorMap = { 'Builtin_function_or_methodType': 'FunctionInspector', 'BuiltinFunctionType': 'FunctionInspector', 'BuiltinMethodType': 'FunctionInspector', 'ClassType': 'ClassInspector', 'CodeType': 'CodeInspector', 'ComplexType': 'Inspector', 'DictionaryType': 'DictionaryInspector', 'DictType': 'DictionaryInspector', 'FileType': 'Inspector', 'FloatType': 'Inspector', 'FunctionType': 'FunctionInspector', 'Instance methodType': 'InstanceMethodInspector', 'InstanceType': 'InstanceInspector', 'IntType': 'Inspector', 'LambdaType': 'Inspector', 'ListType': 'SequenceInspector', 'LongType': 'Inspector', 'MethodType': 'FunctionInspector', 'ModuleType': 'ModuleInspector', 'NoneType': 'Inspector', 'SliceType': 'SliceInspector', 'StringType': 'SequenceInspector', 'TupleType': 'SequenceInspector', 'TypeType': 'Inspector', 'UnboundMethodType': 'FunctionInspector', 'DistributedshipcannonaiType': 'ClassInspector'} for each in notFinishedTypes: _InspectorMap[each] = 'Inspector' class Inspector: def __init__(self, anObject): self.object = anObject self.lastPartNumber = 0 self.initializePartsList() self.initializePartNames() def __str__(self): return __name__ + '(' + str(self.object) + ')' def initializePartsList(self): self._partsList = [] keys = self.namedParts() keys.sort() for each in keys: self._partsList.append(each) #if not callable(getattr(self.object, each)): # self._partsList.append(each) def initializePartNames(self): self._partNames = ['up'] + [str(each) for each in self._partsList] def title(self): "Subclasses may override." return string.capitalize(self.objectType().__name__) def getLastPartNumber(self): return self.lastPartNumber def selectedPart(self): return self.partNumber(self.getLastPartNumber()) def namedParts(self): return dir(self.object) def stringForPartNumber(self, partNumber): object = self.partNumber(partNumber) doc = None if callable(object): try: doc = object.__doc__ except: pass if doc: return (str(object) + '\n' + str(doc)) else: return str(object) def partNumber(self, partNumber): self.lastPartNumber = partNumber if partNumber == 0: return self.object else: part = self.privatePartNumber(partNumber) return getattr(self.object, part) def inspectorFor(self, part): return inspectorFor(part) def privatePartNumber(self, partNumber): return self._partsList[partNumber - 1] def partNames(self): return self._partNames def objectType(self): return type(self.object) ### class ModuleInspector(Inspector): def namedParts(self): return ['__dict__'] class ClassInspector(Inspector): def namedParts(self): return ['__bases__'] + self.object.__dict__.keys() def title(self): return self.object.__name__ + ' Class' class InstanceInspector(Inspector): def title(self): return self.object.__class__.__name__ def namedParts(self): return ['__class__'] + dir(self.object) ### class FunctionInspector(Inspector): def title(self): return self.object.__name__ + "()" class InstanceMethodInspector(Inspector): def title(self): return str(self.object.im_class) + "." + self.object.__name__ + "()" class CodeInspector(Inspector): def title(self): return str(self.object) ### class ComplexInspector(Inspector): def namedParts(self): return ['real', 'imag'] ### class DictionaryInspector(Inspector): def initializePartsList(self): Inspector.initializePartsList(self) keys = self.object.keys() keys.sort() for each in keys: self._partsList.append(each) def partNumber(self, partNumber): self.lastPartNumber = partNumber if partNumber == 0: return self.object key = self.privatePartNumber(partNumber) if key in self.object: return self.object[key] else: return getattr(self.object, key) class SequenceInspector(Inspector): def initializePartsList(self): Inspector.initializePartsList(self) for each in range(len(self.object)): self._partsList.append(each) def partNumber(self, partNumber): self.lastPartNumber = partNumber if partNumber == 0: return self.object index = self.privatePartNumber(partNumber) if type(index) == IntType: return self.object[index] else: return getattr(self.object, index) class SliceInspector(Inspector): def namedParts(self): return ['start', 'stop', 'step'] ### Initialization initializeInspectorMap()
tucbill/manila
refs/heads/master
manila/tests/image/__init__.py
2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import *
deeponion/deeponion
refs/heads/master
test/functional/wallet_listsinceblock.py
16
#!/usr/bin/env python3 # Copyright (c) 2017-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listsincelast RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error class ListSinceBlockTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[2].generate(101) self.sync_all() self.test_no_blockhash() self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() def test_no_blockhash(self): txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) blockhash, = self.nodes[2].generate(1) self.sync_all() txs = self.nodes[0].listtransactions() assert_array_result(txs, {"txid": txid}, { "category": "receive", "amount": 1, "blockhash": blockhash, "confirmations": 1, }) assert_equal( self.nodes[0].listsinceblock(), {"lastblock": blockhash, "removed": [], "transactions": txs}) assert_equal( self.nodes[0].listsinceblock(""), {"lastblock": blockhash, "removed": [], "transactions": txs}) def test_invalid_blockhash(self): assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "0000000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock, "invalid-hex") assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock, "Z000000000000000000000000000000000000000000000000000000000000000") def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: ab0 / \ aa1 [tx0] bb1 | | aa2 bb2 | | aa3 bb3 | bb4 Consider a client that has only seen block `aa3` above. It asks the node to `listsinceblock aa3`. But at some point prior the main chain switched to the bb chain. Previously: listsinceblock would find height=4 for block aa3 and compare this to height=5 for the tip of the chain (bb4). It would then return results restricted to bb3-bb4. Now: listsinceblock finds the fork at ab0 and returns results in the range bb1-bb4. This test only checks that [tx0] is present. ''' # Split network into two self.split_network() # send to nodes[0] from nodes[2] senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) # generate on both sides lastblockhash = self.nodes[1].generate(6)[5] self.nodes[2].generate(7) self.log.info('lastblockhash=%s' % (lastblockhash)) self.sync_all([self.nodes[:2], self.nodes[2:]]) self.join_network() # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) found = False for tx in lsbres['transactions']: if tx['txid'] == senttx: found = True break assert found def test_double_spend(self): ''' This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. ab0 / \ aa1 [tx1] bb1 [tx2] | | aa2 bb2 | | aa3 bb3 | bb4 Problematic case: 1. User 1 receives BTC in tx1 from utxo1 in block aa1. 2. User 2 receives BTC in tx2 from utxo1 (same) in block bb1 3. User 1 sees 2 confirmations at block aa3. 4. Reorg into bb chain. 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now invalidated. Currently the solution to this is to detect that a reorg'd block is asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. ''' self.sync_all() # Split network into two self.split_network() # share utxo between nodes[1] and nodes[2] utxos = self.nodes[2].listunspent() utxo = utxos[0] privkey = self.nodes[2].dumpprivkey(utxo['address']) self.nodes[1].importprivkey(privkey) # send from nodes[1] using utxo to nodes[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[1].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] txid1 = self.nodes[1].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet( self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex']) # send from nodes[2] using utxo to nodes[3] recipient_dict2 = { self.nodes[3].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } self.nodes[2].sendrawtransaction( self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex']) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(4) self.join_network() self.sync_all() # gettransaction should work for txid1 assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1" # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # but it should not include 'removed' if include_removed=false lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False) assert 'removed' not in lsbres2 def test_double_send(self): ''' This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping as a result). ab0 / \ aa1 [tx1] bb1 | | aa2 bb2 | | aa3 bb3 [tx1] | bb4 Asserted: 1. tx1 is listed in listsinceblock. 2. It is included in 'removed' as it was removed, even though it is now present in a different block. 3. It is listed with a confirmation count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). ''' self.sync_all() # Split network into two self.split_network() # create and sign a transaction utxos = self.nodes[2].listunspent() utxo = utxos[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] signedtxres = self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict)) assert signedtxres['complete'] signedtx = signedtxres['hex'] # send from nodes[1]; this will end up in aa1 txid1 = self.nodes[1].sendrawtransaction(signedtx) # generate bb1-bb2 on right side self.nodes[2].generate(2) # send from nodes[2]; this will end up in bb3 txid2 = self.nodes[2].sendrawtransaction(signedtx) assert_equal(txid1, txid2) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(2) self.join_network() self.sync_all() # gettransaction should work for txid1 self.nodes[0].gettransaction(txid1) # listsinceblock(lastblockhash) should now include txid1 in transactions # as well as in removed lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # find transaction and ensure confirmations is valid for tx in lsbres['transactions']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) # the same check for the removed array; confirmations should STILL be 2 for tx in lsbres['removed']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) if __name__ == '__main__': ListSinceBlockTest().main()
eneldoserrata/marcos_openerp
refs/heads/master
addons/account_analytic_analysis/__openerp__.py
48
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Contracts Management', 'version': '1.1', 'category': 'Sales Management', 'description': """ This module is for modifying account analytic view to show important data to project manager of services companies. =================================================================================================================== Adds menu to show relevant information to each manager.You can also view the report of account analytic summary user-wise as well as month-wise. """, 'author': 'Camptocamp', 'website': 'http://www.camptocamp.com/', 'images': ['images/bill_tasks_works.jpeg','images/overpassed_accounts.jpeg'], 'depends': ['hr_timesheet_invoice', 'sale'], #although sale is technically not required to install this module, all menuitems are located under 'Sales' application 'data': [ 'security/ir.model.access.csv', 'security/account_analytic_analysis_security.xml', 'account_analytic_analysis_view.xml', 'account_analytic_analysis_cron.xml', 'res_config_view.xml', ], 'css': [ 'static/src/css/analytic.css' ], 'demo': ['analytic_account_demo.xml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
interfect/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/actions/gyptest-errors.py
351
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies behavior for different action configuration errors: exit status of 1, and the expected error message must be in stderr. """ import TestGyp test = TestGyp.TestGyp(workdir='workarea_errors') test.run_gyp('action_missing_name.gyp', chdir='src', status=1, stderr=None) expect = [ "Anonymous action in target broken_actions2. An action must have an 'action_name' field.", ] test.must_contain_all_lines(test.stderr(), expect) test.pass_test()
vijayanandnandam/youtube-dl
refs/heads/master
youtube_dl/extractor/viki.py
10
# coding: utf-8 from __future__ import unicode_literals import hashlib import hmac import itertools import json import re import time from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_iso8601, sanitized_Request, ) class VikiBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/' _API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com' _API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s' _APP = '100005a' _APP_VERSION = '2.2.5.1428709186' _APP_SECRET = 'MM_d*yP@`&1@]@!AVrXf_o-HVEnoTnm$O-ti4[G~$JDI/Dc-&piU&z&5.;:}95=Iad' _GEO_BYPASS = False _NETRC_MACHINE = 'viki' _token = None _ERRORS = { 'geo': 'Sorry, this content is not available in your region.', 'upcoming': 'Sorry, this content is not yet available.', # 'paywall': 'paywall', } def _prepare_call(self, path, timestamp=None, post_data=None): path += '?' if '?' not in path else '&' if not timestamp: timestamp = int(time.time()) query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp) if self._token: query += '&token=%s' % self._token sig = hmac.new( self._APP_SECRET.encode('ascii'), query.encode('ascii'), hashlib.sha1 ).hexdigest() url = self._API_URL_TEMPLATE % (query, sig) return sanitized_Request( url, json.dumps(post_data).encode('utf-8')) if post_data else url def _call_api(self, path, video_id, note, timestamp=None, post_data=None): resp = self._download_json( self._prepare_call(path, timestamp, post_data), video_id, note) error = resp.get('error') if error: if error == 'invalid timestamp': resp = self._download_json( self._prepare_call(path, int(resp['current_timestamp']), post_data), video_id, '%s (retry)' % note) error = resp.get('error') if error: self._raise_error(resp['error']) return resp def _raise_error(self, error): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) def _check_errors(self, data): for reason, status in data.get('blocking', {}).items(): if status and reason in self._ERRORS: message = self._ERRORS[reason] if reason == 'geo': self.raise_geo_restricted(msg=message) raise ExtractorError('%s said: %s' % ( self.IE_NAME, message), expected=True) def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_form = { 'login_id': username, 'password': password, } login = self._call_api( 'sessions.json', None, 'Logging in', post_data=login_form) self._token = login.get('token') if not self._token: self.report_warning('Unable to get session token, login has probably failed') @staticmethod def dict_selection(dict_obj, preferred_key, allow_fallback=True): if preferred_key in dict_obj: return dict_obj.get(preferred_key) if not allow_fallback: return filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()])) return filtered_dict[0] if filtered_dict else None class VikiIE(VikiBaseIE): IE_NAME = 'viki' _VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14', 'info_dict': { 'id': '1023585v', 'ext': 'mp4', 'title': 'Heirs Episode 14', 'uploader': 'SBS', 'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e', 'upload_date': '20131121', 'age_limit': 13, }, 'skip': 'Blocked in the US', }, { # clip 'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference', 'md5': '86c0b5dbd4d83a6611a79987cc7a1989', 'info_dict': { 'id': '1067139v', 'ext': 'mp4', 'title': "'The Avengers: Age of Ultron' Press Conference", 'description': 'md5:d70b2f9428f5488321bfe1db10d612ea', 'duration': 352, 'timestamp': 1430380829, 'upload_date': '20150430', 'uploader': 'Arirang TV', 'like_count': int, 'age_limit': 0, } }, { 'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi', 'info_dict': { 'id': '1048879v', 'ext': 'mp4', 'title': 'Ankhon Dekhi', 'duration': 6512, 'timestamp': 1408532356, 'upload_date': '20140820', 'uploader': 'Spuul', 'like_count': int, 'age_limit': 13, }, 'skip': 'Blocked in the US', }, { # episode 'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1', 'md5': '5fa476a902e902783ac7a4d615cdbc7a', 'info_dict': { 'id': '44699v', 'ext': 'mp4', 'title': 'Boys Over Flowers - Episode 1', 'description': 'md5:b89cf50038b480b88b5b3c93589a9076', 'duration': 4204, 'timestamp': 1270496524, 'upload_date': '20100405', 'uploader': 'group8', 'like_count': int, 'age_limit': 13, } }, { # youtube external 'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1', 'md5': '63f8600c1da6f01b7640eee7eca4f1da', 'info_dict': { 'id': '50562v', 'ext': 'webm', 'title': 'Poor Nastya [COMPLETE] - Episode 1', 'description': '', 'duration': 606, 'timestamp': 1274949505, 'upload_date': '20101213', 'uploader': 'ad14065n', 'uploader_id': 'ad14065n', 'like_count': int, 'age_limit': 13, } }, { 'url': 'http://www.viki.com/player/44699v', 'only_matching': True, }, { # non-English description 'url': 'http://www.viki.com/videos/158036v-love-in-magic', 'md5': '1713ae35df5a521b31f6dc40730e7c9c', 'info_dict': { 'id': '158036v', 'ext': 'mp4', 'uploader': 'I Planet Entertainment', 'upload_date': '20111122', 'timestamp': 1321985454, 'description': 'md5:44b1e46619df3a072294645c770cef36', 'title': 'Love In Magic', 'age_limit': 13, }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._call_api( 'videos/%s.json' % video_id, video_id, 'Downloading video JSON') self._check_errors(video) title = self.dict_selection(video.get('titles', {}), 'en', allow_fallback=False) if not title: title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id container_titles = video.get('container', {}).get('titles', {}) container_title = self.dict_selection(container_titles, 'en') title = '%s - %s' % (container_title, title) description = self.dict_selection(video.get('descriptions', {}), 'en') duration = int_or_none(video.get('duration')) timestamp = parse_iso8601(video.get('created_at')) uploader = video.get('author') like_count = int_or_none(video.get('likes', {}).get('count')) age_limit = parse_age_limit(video.get('rating')) thumbnails = [] for thumbnail_id, thumbnail in video.get('images', {}).items(): thumbnails.append({ 'id': thumbnail_id, 'url': thumbnail.get('url'), }) subtitles = {} for subtitle_lang, _ in video.get('subtitle_completions', {}).items(): subtitles[subtitle_lang] = [{ 'ext': subtitles_format, 'url': self._prepare_call( 'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)), } for subtitles_format in ('srt', 'vtt')] result = { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'like_count': like_count, 'age_limit': age_limit, 'thumbnails': thumbnails, 'subtitles': subtitles, } streams = self._call_api( 'videos/%s/streams.json' % video_id, video_id, 'Downloading video streams JSON') if 'external' in streams: result.update({ '_type': 'url_transparent', 'url': streams['external']['url'], }) return result formats = [] for format_id, stream_dict in streams.items(): height = int_or_none(self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None)) for protocol, format_dict in stream_dict.items(): # rtmps URLs does not seem to work if protocol == 'rtmps': continue format_url = format_dict['url'] if format_id == 'm3u8': m3u8_formats = self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='m3u8-%s' % protocol, fatal=False) # Despite CODECS metadata in m3u8 all video-only formats # are actually video+audio for f in m3u8_formats: if f.get('acodec') == 'none' and f.get('vcodec') != 'none': f['acodec'] = None formats.extend(m3u8_formats) elif format_url.startswith('rtmp'): mobj = re.search( r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$', format_url) if not mobj: continue formats.append({ 'format_id': 'rtmp-%s' % format_id, 'ext': 'flv', 'url': mobj.group('url'), 'play_path': mobj.group('playpath'), 'app': mobj.group('app'), 'page_url': url, }) else: formats.append({ 'url': format_url, 'format_id': '%s-%s' % (format_id, protocol), 'height': height, }) self._sort_formats(formats) result['formats'] = formats return result class VikiChannelIE(VikiBaseIE): IE_NAME = 'viki:channel' _VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://www.viki.com/tv/50c-boys-over-flowers', 'info_dict': { 'id': '50c', 'title': 'Boys Over Flowers', 'description': 'md5:ecd3cff47967fe193cff37c0bec52790', }, 'playlist_mincount': 71, }, { 'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete', 'info_dict': { 'id': '1354c', 'title': 'Poor Nastya [COMPLETE]', 'description': 'md5:05bf5471385aa8b21c18ad450e350525', }, 'playlist_count': 127, }, { 'url': 'http://www.viki.com/news/24569c-showbiz-korea', 'only_matching': True, }, { 'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005', 'only_matching': True, }, { 'url': 'http://www.viki.com/artists/2141c-shinee', 'only_matching': True, }] _PER_PAGE = 25 def _real_extract(self, url): channel_id = self._match_id(url) channel = self._call_api( 'containers/%s.json' % channel_id, channel_id, 'Downloading channel JSON') self._check_errors(channel) title = self.dict_selection(channel['titles'], 'en') description = self.dict_selection(channel['descriptions'], 'en') entries = [] for video_type in ('episodes', 'clips', 'movies'): for page_num in itertools.count(1): page = self._call_api( 'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d' % (channel_id, video_type, self._PER_PAGE, page_num), channel_id, 'Downloading %s JSON page #%d' % (video_type, page_num)) for video in page['response']: video_id = video['id'] entries.append(self.url_result( 'http://www.viki.com/videos/%s' % video_id, 'Viki')) if not page['pagination']['next']: break return self.playlist_result(entries, channel_id, title, description)
kernc/networkx
refs/heads/master
examples/pygraphviz/write_dotfile.py
25
#!/usr/bin/env python """ Write a dot file from a networkx graph for further processing with graphviz. You need to have either pygraphviz or pydot for this example. See http://networkx.github.io/documentation/latest/reference/drawing.html for more info. """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" # Copyright (C) 2004-2015 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as NX # and the following code block is not needed # but we want to see which module is used and # if and why it fails try: m=NX.drawing.write_dot.__module__ except: print() print("pygraphviz or pydot were not found ") print("see http://networkx.github.io/documentation/latest/reference/drawing.html for info") print() raise print("using module", m) G=NX.grid_2d_graph(5,5) # 5x5 grid NX.write_dot(G,"grid.dot") print("Now run: neato -Tps grid.dot >grid.ps")
t0mk/ansible
refs/heads/devel
lib/ansible/modules/cloud/openstack/_nova_keypair.py
21
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <benno@ansible.com> # (c) 2013, John Dewey <john@dewey.ws> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['deprecated'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" deprecated: Deprecated in 2.0. Use M(os_keypair) instead. short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: 'yes' login_tenant_name: description: - The tenant name of the login user required: true default: 'yes' auth_url: description: - The keystone url for authentication required: false default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present name: description: - Name that has to be given to the key pair required: true default: None public_key: description: - The public key that would be uploaded to nova and injected to vm's upon creation required: false default: None requirements: - "python >= 2.6" - "python-novaclient" ''' EXAMPLES = ''' - name: Create a key pair with the running users public key nova_keypair: state: present login_username: admin login_password: admin login_tenant_name: admin name: ansible_key public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" - name: Create a new key pair and the private key returned after the run. nova_keypair: state: present login_username: admin login_password: admin login_tenant_name: admin name: ansible_key ''' import time try: from novaclient.v1_1 import client as nova_client from novaclient import exceptions as exc HAS_NOVACLIENT = True except ImportError: HAS_NOVACLIENT = False def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( name = dict(required=True), public_key = dict(default=None), state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_NOVACLIENT: module.fail_json(msg='python-novaclient is required for this module to work') nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], region_name=module.params['region_name'], service_type='compute') try: nova.authenticate() except exc.Unauthorized as e: module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) except exc.AuthorizationFailure as e: module.fail_json(msg = "Unable to authorize user: %s" % e.message) if module.params['state'] == 'present': for key in nova.keypairs.list(): if key.name == module.params['name']: if module.params['public_key'] and (module.params['public_key'] != key.public_key ): module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name'])) else: module.exit_json(changed = False, result = "Key present") try: key = nova.keypairs.create(module.params['name'], module.params['public_key']) except Exception as e: module.exit_json(msg = "Error in creating the keypair: %s" % e.message) if not module.params['public_key']: module.exit_json(changed = True, key = key.private_key) module.exit_json(changed = True, key = None) if module.params['state'] == 'absent': for key in nova.keypairs.list(): if key.name == module.params['name']: try: nova.keypairs.delete(module.params['name']) except Exception as e: module.fail_json(msg = "The keypair deletion has failed: %s" % e.message) module.exit_json( changed = True, result = "deleted") module.exit_json(changed = False, result = "not present") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
welex91/ansible-modules-core
refs/heads/devel
system/selinux.py
198
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Derek Carter<goozbach@friocorte.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: selinux short_description: Change policy and state of SELinux description: - Configures the SELinux mode and policy. A reboot may be required after usage. Ansible will not issue this reboot but will let you know when it is required. version_added: "0.7" options: policy: description: - "name of the SELinux policy to use (example: C(targeted)) will be required if state is not C(disabled)" required: false default: null state: description: - The SELinux mode required: true default: null choices: [ "enforcing", "permissive", "disabled" ] conf: description: - path to the SELinux configuration file, if non-standard required: false default: "/etc/selinux/config" notes: - Not tested on any debian based system requirements: [ libselinux-python ] author: "Derek Carter (@goozbach) <goozbach@friocorte.com>" ''' EXAMPLES = ''' - selinux: policy=targeted state=enforcing - selinux: policy=targeted state=permissive - selinux: state=disabled ''' import os import re import sys try: import selinux HAS_SELINUX = True except ImportError: HAS_SELINUX = False # getter subroutines def get_config_state(configfile): myfile = open(configfile, "r") lines = myfile.readlines() myfile.close() for line in lines: stateline = re.match('^SELINUX=.*$', line) if (stateline): return(line.split('=')[1].strip()) def get_config_policy(configfile): myfile = open(configfile, "r") lines = myfile.readlines() myfile.close() for line in lines: stateline = re.match('^SELINUXTYPE=.*$', line) if (stateline): return(line.split('=')[1].strip()) # setter subroutines def set_config_state(state, configfile): #SELINUX=permissive # edit config file with state value stateline='SELINUX=%s' % state myfile = open(configfile, "r") lines = myfile.readlines() myfile.close() myfile = open(configfile, "w") for line in lines: myfile.write(re.sub(r'^SELINUX=.*', stateline, line)) myfile.close() def set_state(state): if (state == 'enforcing'): selinux.security_setenforce(1) elif (state == 'permissive'): selinux.security_setenforce(0) elif (state == 'disabled'): pass else: msg = 'trying to set invalid runtime state %s' % state module.fail_json(msg=msg) def set_config_policy(policy, configfile): # edit config file with state value #SELINUXTYPE=targeted policyline='SELINUXTYPE=%s' % policy myfile = open(configfile, "r") lines = myfile.readlines() myfile.close() myfile = open(configfile, "w") for line in lines: myfile.write(re.sub(r'^SELINUXTYPE=.*', policyline, line)) myfile.close() def main(): module = AnsibleModule( argument_spec = dict( policy=dict(required=False), state=dict(choices=['enforcing', 'permissive', 'disabled'], required=True), configfile=dict(aliases=['conf','file'], default='/etc/selinux/config') ), supports_check_mode=True ) if not HAS_SELINUX: module.fail_json(msg='libselinux-python required for this module') # global vars changed=False msgs = [] configfile = module.params['configfile'] policy = module.params['policy'] state = module.params['state'] runtime_enabled = selinux.is_selinux_enabled() runtime_policy = selinux.selinux_getpolicytype()[1] runtime_state = 'disabled' if (runtime_enabled): # enabled means 'enforcing' or 'permissive' if (selinux.security_getenforce()): runtime_state = 'enforcing' else: runtime_state = 'permissive' config_policy = get_config_policy(configfile) config_state = get_config_state(configfile) # check to see if policy is set if state is not 'disabled' if (state != 'disabled'): if not policy: module.fail_json(msg='policy is required if state is not \'disabled\'') else: if not policy: policy = config_policy # check changed values and run changes if (policy != runtime_policy): if module.check_mode: module.exit_json(changed=True) # cannot change runtime policy msgs.append('reboot to change the loaded policy') changed=True if (policy != config_policy): if module.check_mode: module.exit_json(changed=True) msgs.append('config policy changed from \'%s\' to \'%s\'' % (config_policy, policy)) set_config_policy(policy, configfile) changed=True if (state != runtime_state): if module.check_mode: module.exit_json(changed=True) if (runtime_enabled): if (state == 'disabled'): if (runtime_state != 'permissive'): # Temporarily set state to permissive set_state('permissive') msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state)) else: msgs.append('state change will take effect next reboot') else: set_state(state) msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) else: msgs.append('state change will take effect next reboot') changed=True if (state != config_state): if module.check_mode: module.exit_json(changed=True) msgs.append('config state changed from \'%s\' to \'%s\'' % (config_state, state)) set_config_state(state, configfile) changed=True module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state) ################################################# # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
theguardian/headphones
refs/heads/master
lib/httplib2/__init__.py
24
from __future__ import generators """ httplib2 A caching http interface that supports ETags and gzip to conserve bandwidth. Requires Python 2.3 or later Changelog: 2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", "James Antill", "Xavier Verges Farrero", "Jonathan Feinberg", "Blair Zajac", "Sam Ruby", "Louis Nyffenegger"] __license__ = "MIT" __version__ = "$Rev$" import re import sys import email import email.Utils import email.Message import email.FeedParser import StringIO import gzip import zlib import httplib import urlparse import base64 import os import copy import calendar import time import random import errno # remove depracated warning in python2.6 try: from hashlib import sha1 as _sha, md5 as _md5 except ImportError: import sha import md5 _sha = sha.new _md5 = md5.new import hmac from gettext import gettext as _ import socket # Try using local version, followed by system, and none if neither are found try: import socks as socks except ImportError: try: import socks as socks except ImportError: socks = None # Build the appropriate socket wrapper for ssl try: import ssl # python 2.6 _ssl_wrap_socket = ssl.wrap_socket except ImportError: def _ssl_wrap_socket(sock, key_file, cert_file): ssl_sock = socket.ssl(sock, key_file, cert_file) return httplib.FakeSocket(sock, ssl_sock) if sys.version_info >= (2,3): from iri2uri import iri2uri else: def iri2uri(uri): return uri def has_timeout(timeout): # python 2.6 if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'): return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT) return (timeout is not None) __all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', 'debuglevel'] # The httplib debug level, set to a non-zero value to get debug output debuglevel = 0 # Python 2.3 support if sys.version_info < (2,4): def sorted(seq): seq.sort() return seq # Python 2.3 support def HTTPResponse__getheaders(self): """Return list of (header, value) tuples.""" if self.msg is None: raise httplib.ResponseNotReady() return self.msg.items() if not hasattr(httplib.HTTPResponse, 'getheaders'): httplib.HTTPResponse.getheaders = HTTPResponse__getheaders # All exceptions raised here derive from HttpLib2Error class HttpLib2Error(Exception): pass # Some exceptions can be caught and optionally # be turned back into responses. class HttpLib2ErrorWithResponse(HttpLib2Error): def __init__(self, desc, response, content): self.response = response self.content = content HttpLib2Error.__init__(self, desc) class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass class RedirectLimit(HttpLib2ErrorWithResponse): pass class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class RelativeURIError(HttpLib2Error): pass class ServerNotFoundError(HttpLib2Error): pass # Open Items: # ----------- # Proxy support # Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) # Pluggable cache storage (supports storing the cache in # flat files by default. We need a plug-in architecture # that can support Berkeley DB and Squid) # == Known Issues == # Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. # Does not handle Cache-Control: max-stale # Does not use Age: headers when calculating cache freshness. # The number of redirections to follow before giving up. # Note that only GET redirects are automatically followed. # Will also honor 301 requests by saving that info and never # requesting that URI again. DEFAULT_MAX_REDIRECTS = 5 # Which headers are hop-by-hop headers by default HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] def _get_end2end_headers(response): hopbyhop = list(HOP_BY_HOP) hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) return [header for header in response.keys() if header not in hopbyhop] URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) def urlnorm(uri): (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) authority = authority.lower() scheme = scheme.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path scheme = scheme.lower() defrag_uri = scheme + "://" + authority + request_uri return scheme, authority, request_uri, defrag_uri # Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) re_url_scheme = re.compile(r'^\w+://') re_slash = re.compile(r'[?/:|]+') def safename(filename): """Return a filename suitable for the cache. Strips dangerous and common characters to create a filename we can use to store the cache in. """ try: if re_url_scheme.match(filename): if isinstance(filename,str): filename = filename.decode('utf-8') filename = filename.encode('idna') else: filename = filename.encode('idna') except UnicodeError: pass if isinstance(filename,unicode): filename=filename.encode('utf-8') filemd5 = _md5(filename).hexdigest() filename = re_url_scheme.sub("", filename) filename = re_slash.sub(",", filename) # limit length of filename if len(filename)>200: filename=filename[:200] return ",".join((filename, filemd5)) NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') def _normalize_headers(headers): return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) def _parse_cache_control(headers): retval = {} if headers.has_key('cache-control'): parts = headers['cache-control'].split(',') parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")] parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] retval = dict(parts_with_args + parts_wo_args) return retval # Whether to use a strict mode to parse WWW-Authenticate headers # Might lead to bad results in case of ill-formed header value, # so disabled by default, falling back to relaxed parsing. # Set to true to turn on, usefull for testing servers. USE_WWW_AUTH_STRICT_PARSING = 0 # In regex below: # [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP # "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space # Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: # \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") UNQUOTE_PAIRS = re.compile(r'\\(.)') def _parse_www_authenticate(headers, headername='www-authenticate'): """Returns a dictionary of dictionaries, one dict per auth_scheme.""" retval = {} if headers.has_key(headername): authenticate = headers[headername].strip() www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED while authenticate: # Break off the scheme at the beginning of the line if headername == 'authentication-info': (auth_scheme, the_rest) = ('digest', authenticate) else: (auth_scheme, the_rest) = authenticate.split(" ", 1) # Now loop over all the key value pairs that come after the scheme, # being careful not to roll into the next scheme match = www_auth.search(the_rest) auth_params = {} while match: if match and len(match.groups()) == 3: (key, value, the_rest) = match.groups() auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) match = www_auth.search(the_rest) retval[auth_scheme.lower()] = auth_params authenticate = the_rest.strip() return retval def _entry_disposition(response_headers, request_headers): """Determine freshness from the Date, Expires and Cache-Control headers. We don't handle the following: 1. Cache-Control: max-stale 2. Age: headers are not used in the calculations. Not that this algorithm is simpler than you might think because we are operating as a private (non-shared) cache. This lets us ignore 's-maxage'. We can also ignore 'proxy-invalidate' since we aren't a proxy. We will never return a stale document as fresh as a design decision, and thus the non-implementation of 'max-stale'. This also lets us safely ignore 'must-revalidate' since we operate as if every server has sent 'must-revalidate'. Since we are private we get to ignore both 'public' and 'private' parameters. We also ignore 'no-transform' since we don't do any transformations. The 'no-store' parameter is handled at a higher level. So the only Cache-Control parameters we look at are: no-cache only-if-cached max-age min-fresh """ retval = "STALE" cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: retval = "TRANSPARENT" if 'cache-control' not in request_headers: request_headers['cache-control'] = 'no-cache' elif cc.has_key('no-cache'): retval = "TRANSPARENT" elif cc_response.has_key('no-cache'): retval = "STALE" elif cc.has_key('only-if-cached'): retval = "FRESH" elif response_headers.has_key('date'): date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) now = time.time() current_age = max(0, now - date) if cc_response.has_key('max-age'): try: freshness_lifetime = int(cc_response['max-age']) except ValueError: freshness_lifetime = 0 elif response_headers.has_key('expires'): expires = email.Utils.parsedate_tz(response_headers['expires']) if None == expires: freshness_lifetime = 0 else: freshness_lifetime = max(0, calendar.timegm(expires) - date) else: freshness_lifetime = 0 if cc.has_key('max-age'): try: freshness_lifetime = int(cc['max-age']) except ValueError: freshness_lifetime = 0 if cc.has_key('min-fresh'): try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 current_age += min_fresh if freshness_lifetime > current_age: retval = "FRESH" return retval def _decompressContent(response, new_content): content = new_content try: encoding = response.get('content-encoding', None) if encoding in ['gzip', 'deflate']: if encoding == 'gzip': content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() if encoding == 'deflate': content = zlib.decompress(content) response['content-length'] = str(len(content)) # Record the historical presence of the encoding in a way the won't interfere. response['-content-encoding'] = response['content-encoding'] del response['content-encoding'] except IOError: content = "" raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) return content def _updateCache(request_headers, response_headers, content, cache, cachekey): if cachekey: cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if cc.has_key('no-store') or cc_response.has_key('no-store'): cache.delete(cachekey) else: info = email.Message.Message() for key, value in response_headers.iteritems(): if key not in ['status','content-encoding','transfer-encoding']: info[key] = value # Add annotations to the cache to indicate what headers # are variant for this request. vary = response_headers.get('vary', None) if vary: vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header try: info[key] = request_headers[header] except KeyError: pass status = response_headers.status if status == 304: status = 200 status_header = 'status: %d\r\n' % response_headers.status header_str = info.as_string() header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) text = "".join([status_header, header_str, content]) cache.set(cachekey, text) def _cnonce(): dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() return dig[:16] def _wsse_username_token(cnonce, iso_now, password): return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() # For credentials we need two things, first # a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) # Then we also need a list of URIs that have already demanded authentication # That list is tricky since sub-URIs can take the same auth, or the # auth scheme may change as you descend the tree. # So we also need each Auth instance to be able to tell us # how close to the 'top' it is. class Authentication(object): def __init__(self, credentials, host, request_uri, headers, response, content, http): (scheme, authority, path, query, fragment) = parse_uri(request_uri) self.path = path self.host = host self.credentials = credentials self.http = http def depth(self, request_uri): (scheme, authority, path, query, fragment) = parse_uri(request_uri) return request_uri[len(self.path):].count("/") def inscope(self, host, request_uri): # XXX Should we normalize the request_uri? (scheme, authority, path, query, fragment) = parse_uri(request_uri) return (host == self.host) and path.startswith(self.path) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header. Over-rise this in sub-classes.""" pass def response(self, response, content): """Gives us a chance to update with new nonces or such returned from the last authorized response. Over-rise this in sub-classes if necessary. Return TRUE is the request is to be retried, for example Digest may return stale=true. """ return False class BasicAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip() class DigestAuthentication(Authentication): """Only do qop='auth' and MD5, since that is all Apache currently implements""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['digest'] qop = self.challenge.get('qop', 'auth') self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None if self.challenge['qop'] is None: raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper() if self.challenge['algorithm'] != 'MD5': raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) self.challenge['nc'] = 1 def request(self, method, request_uri, headers, content, cnonce = None): """Modify the request headers""" H = lambda x: _md5(x).hexdigest() KD = lambda s, d: H("%s:%s" % (s, d)) A2 = "".join([method, ":", request_uri]) self.challenge['cnonce'] = cnonce or _cnonce() request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'], '%08x' % self.challenge['nc'], self.challenge['cnonce'], self.challenge['qop'], H(A2) )) headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['nonce'], request_uri, self.challenge['algorithm'], request_digest, self.challenge['qop'], self.challenge['nc'], self.challenge['cnonce'], ) self.challenge['nc'] += 1 def response(self, response, content): if not response.has_key('authentication-info'): challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) if 'true' == challenge.get('stale'): self.challenge['nonce'] = challenge['nonce'] self.challenge['nc'] = 1 return True else: updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) if updated_challenge.has_key('nextnonce'): self.challenge['nonce'] = updated_challenge['nextnonce'] self.challenge['nc'] = 1 return False class HmacDigestAuthentication(Authentication): """Adapted from Robert Sayre's code and DigestAuthentication above.""" __author__ = "Thomas Broyer (t.broyer@ltgt.net)" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['hmacdigest'] # TODO: self.challenge['domain'] self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') if self.challenge['reason'] not in ['unauthorized', 'integrity']: self.challenge['reason'] = 'unauthorized' self.challenge['salt'] = self.challenge.get('salt', '') if not self.challenge.get('snonce'): raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) if self.challenge['algorithm'] == 'HMAC-MD5': self.hashmod = _md5 else: self.hashmod = _sha if self.challenge['pw-algorithm'] == 'MD5': self.pwhashmod = _md5 else: self.pwhashmod = _sha self.key = "".join([self.credentials[0], ":", self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), ":", self.challenge['realm'] ]) self.key = self.pwhashmod.new(self.key).hexdigest().lower() def request(self, method, request_uri, headers, content): """Modify the request headers""" keys = _get_end2end_headers(headers) keylist = "".join(["%s " % k for k in keys]) headers_val = "".join([headers[k] for k in keys]) created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) cnonce = _cnonce() request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['snonce'], cnonce, request_uri, created, request_digest, keylist, ) def response(self, response, content): challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) if challenge.get('reason') in ['integrity', 'stale']: return True return False class WsseAuthentication(Authentication): """This is thinly tested and should not be relied upon. At this time there isn't any third party server to test against. Blogger and TypePad implemented this algorithm at one point but Blogger has since switched to Basic over HTTPS and TypePad has implemented it wrong, by never issuing a 401 challenge but instead requiring your client to telepathically know that their endpoint is expecting WSSE profile="UsernameToken".""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['Authorization'] = 'WSSE profile="UsernameToken"' iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) cnonce = _cnonce() password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self.credentials[0], password_digest, cnonce, iso_now) class GoogleLoginAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): from urllib import urlencode Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') service = challenge['googlelogin'].get('service', 'xapi') # Bloggger actually returns the service in the challenge # For the rest we guess based on the URI if service == 'xapi' and request_uri.find("calendar") > 0: service = "cl" # No point in guessing Base or Spreadsheet #elif request_uri.find("spreadsheets") > 0: # service = "wise" auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) lines = content.split('\n') d = dict([tuple(line.split("=", 1)) for line in lines if line]) if resp.status == 403: self.Auth = "" else: self.Auth = d['Auth'] def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'GoogleLogin Auth=' + self.Auth AUTH_SCHEME_CLASSES = { "basic": BasicAuthentication, "wsse": WsseAuthentication, "digest": DigestAuthentication, "hmacdigest": HmacDigestAuthentication, "googlelogin": GoogleLoginAuthentication } AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] class FileCache(object): """Uses a local directory as a store for cached files. Not really safe to use if multiple threads or processes are going to be running on the same cache. """ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior self.cache = cache self.safe = safe if not os.path.exists(cache): os.makedirs(self.cache) def get(self, key): retval = None cacheFullPath = os.path.join(self.cache, self.safe(key)) try: f = file(cacheFullPath, "rb") retval = f.read() f.close() except IOError, e: pass return retval def set(self, key, value): cacheFullPath = os.path.join(self.cache, self.safe(key)) f = file(cacheFullPath, "wb") f.write(value) f.close() def delete(self, key): cacheFullPath = os.path.join(self.cache, self.safe(key)) if os.path.exists(cacheFullPath): os.remove(cacheFullPath) class Credentials(object): def __init__(self): self.credentials = [] def add(self, name, password, domain=""): self.credentials.append((domain.lower(), name, password)) def clear(self): self.credentials = [] def iter(self, domain): for (cdomain, name, password) in self.credentials: if cdomain == "" or domain == cdomain: yield (name, password) class KeyCerts(Credentials): """Identical to Credentials except that name/password are mapped to key/cert.""" pass class ProxyInfo(object): """Collect information required to use a proxy.""" def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX constants. For example: p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) """ self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass def astuple(self): return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass) def isgood(self): return socks and (self.proxy_host != None) and (self.proxy_port != None) class HTTPConnectionWithTimeout(httplib.HTTPConnection): """HTTPConnection subclass that supports timeouts""" def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPConnection.__init__(self, host, port, strict) self.timeout = timeout self.proxy_info = proxy_info def connect(self): """Connect to the host and port specified in __init__.""" # Mostly verbatim from httplib.py. msg = "getaddrinfo returns an empty list" for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: if self.proxy_info and self.proxy_info.isgood(): self.sock = socks.socksocket(af, socktype, proto) self.sock.setproxy(*self.proxy_info.astuple()) else: self.sock = socket.socket(af, socktype, proto) # Different from httplib: support timeouts. if has_timeout(self.timeout): self.sock.settimeout(self.timeout) # End of difference from httplib. if self.debuglevel > 0: print "connect: (%s, %s)" % (self.host, self.port) self.sock.connect(sa) except socket.error, msg: if self.debuglevel > 0: print 'connect fail:', (self.host, self.port) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): "This class allows communication via SSL." def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict) self.timeout = timeout self.proxy_info = proxy_info def connect(self): "Connect to a host on a given (SSL) port." if self.proxy_info and self.proxy_info.isgood(): sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM) sock.setproxy(*self.proxy_info.astuple()) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if has_timeout(self.timeout): sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file) class Http(object): """An HTTP client that handles: - all methods - caching - ETags - compression, - HTTPS - Basic - Digest - WSSE and more. """ def __init__(self, cache=None, timeout=None, proxy_info=None): """The value of proxy_info is a ProxyInfo instance. If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache.""" self.proxy_info = proxy_info # Map domain name to an httplib connection self.connections = {} # The location of the cache, for now a directory # where cached responses are held. if cache and isinstance(cache, str): self.cache = FileCache(cache) else: self.cache = cache # Name/password self.credentials = Credentials() # Key/cert self.certificates = KeyCerts() # authorization objects self.authorizations = [] # If set to False then no redirects are followed, even safe ones. self.follow_redirects = True # Which HTTP methods do we apply optimistic concurrency to, i.e. # which methods get an "if-match:" etag header added to them. self.optimistic_concurrency_methods = ["PUT"] # If 'follow_redirects' is True, and this is set to True then # all redirecs are followed, including unsafe ones. self.follow_all_redirects = False self.ignore_etag = False self.force_exception_to_status_code = False self.timeout = timeout def _auth_from_challenge(self, host, request_uri, headers, response, content): """A generator that creates Authorization objects that can be applied to requests. """ challenges = _parse_www_authenticate(response, 'www-authenticate') for cred in self.credentials.iter(host): for scheme in AUTH_SCHEME_ORDER: if challenges.has_key(scheme): yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) def add_credentials(self, name, password, domain=""): """Add a name and password that will be used any time a request requires authentication.""" self.credentials.add(name, password, domain) def add_certificate(self, key, cert, domain): """Add a key and cert that will be used any time a request requires authentication.""" self.certificates.add(key, cert, domain) def clear_credentials(self): """Remove all the names and passwords that are used for authentication""" self.credentials.clear() self.authorizations = [] def _conn_request(self, conn, request_uri, method, body, headers): for i in range(2): try: conn.request(method, request_uri, body, headers) except socket.gaierror: conn.close() raise ServerNotFoundError("Unable to find the server at %s" % conn.host) except socket.error, e: if not hasattr(e, 'errno'): # I don't know what this is so lets raise it if it happens raise elif e.errno == errno.ECONNREFUSED: # Connection refused raise # Just because the server closed the connection doesn't apparently mean # that the server didn't send a response. pass except httplib.HTTPException: # Just because the server closed the connection doesn't apparently mean # that the server didn't send a response. pass try: response = conn.getresponse() except (socket.error, httplib.HTTPException): if i == 0: conn.close() conn.connect() continue else: raise else: content = "" if method == "HEAD": response.close() else: content = response.read() response = Response(response) if method != "HEAD": content = _decompressContent(response, content) break return (response, content) def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): """Do the actual request using the connection object and also follow one level of redirects if necessary""" auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] auth = auths and sorted(auths)[0][1] or None if auth: auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers) if auth: if auth.response(response, body): auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers ) response._stale_digest = 1 if response.status == 401: for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): authorization.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) if response.status != 401: self.authorizations.append(authorization) authorization.response(response, body) break if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: # Pick out the location header and basically start from the beginning # remembering first to strip the ETag header and decrement our 'depth' if redirections: if not response.has_key('location') and response.status != 300: raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) # Fix-up relative redirects (which violate an RFC 2616 MUST) if response.has_key('location'): location = response['location'] (scheme, authority, path, query, fragment) = parse_uri(location) if authority is None: response['location'] = urlparse.urljoin(absolute_uri, location) if response.status == 301 and method in ["GET", "HEAD"]: response['-x-permanent-redirect-url'] = response['location'] if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) if headers.has_key('if-none-match'): del headers['if-none-match'] if headers.has_key('if-modified-since'): del headers['if-modified-since'] if response.has_key('location'): location = response['location'] old_response = copy.deepcopy(response) if not old_response.has_key('content-location'): old_response['content-location'] = absolute_uri redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) response.previous = old_response else: raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content) elif response.status in [200, 203] and method == "GET": # Don't cache 206's since we aren't going to handle byte range requests if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) return (response, content) def _normalize_headers(self, headers): return _normalize_headers(headers) # Need to catch and rebrand some exceptions # Then need to optionally turn all exceptions into status codes # including all socket.* and httplib.* exceptions. def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): """ Performs a single HTTP request. The 'uri' is the URI of the HTTP resource and can begin with either 'http' or 'https'. The value of 'uri' must be an absolute URI. The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. There is no restriction on the methods allowed. The 'body' is the entity body to be sent with the request. It is a string object. Any extra headers that are to be sent with the request should be provided in the 'headers' dictionary. The maximum number of redirect to follow before raising an exception is 'redirections. The default is 5. The return value is a tuple of (response, content), the first being and instance of the 'Response' class, the second being a string that contains the response entity body. """ try: if headers is None: headers = {} else: headers = self._normalize_headers(headers) if not headers.has_key('user-agent'): headers['user-agent'] = "Python-httplib2/%s" % __version__ uri = iri2uri(uri) (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) domain_port = authority.split(":")[0:2] if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http': scheme = 'https' authority = domain_port[0] conn_key = scheme+":"+authority if conn_key in self.connections: conn = self.connections[conn_key] else: if not connection_type: connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout certs = list(self.certificates.iter(authority)) if scheme == 'https' and certs: conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0], cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info) else: conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info) conn.set_debuglevel(debuglevel) if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers: headers['accept-encoding'] = 'gzip, deflate' info = email.Message.Message() cached_value = None if self.cache: cachekey = defrag_uri cached_value = self.cache.get(cachekey) if cached_value: # info = email.message_from_string(cached_value) # # Need to replace the line above with the kludge below # to fix the non-existent bug not fixed in this # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html try: info, content = cached_value.split('\r\n\r\n', 1) feedparser = email.FeedParser.FeedParser() feedparser.feed(info) info = feedparser.close() feedparser._parse = None except IndexError, ValueError: self.cache.delete(cachekey) cachekey = None cached_value = None else: cachekey = None if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: # http://www.w3.org/1999/04/Editing/ headers['if-match'] = info['etag'] if method not in ["GET", "HEAD"] and self.cache and cachekey: # RFC 2616 Section 13.10 self.cache.delete(cachekey) # Check the vary header in the cache to see if this request # matches what varies in the cache. if method in ['GET', 'HEAD'] and 'vary' in info: vary = info['vary'] vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header value = info[key] if headers.get(header, '') != value: cached_value = None break if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: if info.has_key('-x-permanent-redirect-url'): # Should cached permanent redirects be counted in our redirection count? For now, yes. (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) response.previous = Response(info) response.previous.fromcache = True else: # Determine our course of action: # Is the cached entry fresh or stale? # Has the client requested a non-cached response? # # There seems to be three possible answers: # 1. [FRESH] Return the cache entry w/o doing a GET # 2. [STALE] Do the GET (but add in cache validators if available) # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request entry_disposition = _entry_disposition(info, headers) if entry_disposition == "FRESH": if not cached_value: info['status'] = '504' content = "" response = Response(info) if cached_value: response.fromcache = True return (response, content) if entry_disposition == "STALE": if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: headers['if-none-match'] = info['etag'] if info.has_key('last-modified') and not 'last-modified' in headers: headers['if-modified-since'] = info['last-modified'] elif entry_disposition == "TRANSPARENT": pass (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) if response.status == 304 and method == "GET": # Rewrite the cache entry with the new end-to-end headers # Take all headers that are in response # and overwrite their values in info. # unless they are hop-by-hop, or are listed in the connection header. for key in _get_end2end_headers(response): info[key] = response[key] merged_response = Response(info) if hasattr(response, "_stale_digest"): merged_response._stale_digest = response._stale_digest _updateCache(headers, merged_response, content, self.cache, cachekey) response = merged_response response.status = 200 response.fromcache = True elif response.status == 200: content = new_content else: self.cache.delete(cachekey) content = new_content else: cc = _parse_cache_control(headers) if cc.has_key('only-if-cached'): info['status'] = '504' response = Response(info) content = "" else: (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) except Exception, e: if self.force_exception_to_status_code: if isinstance(e, HttpLib2ErrorWithResponse): response = e.response content = e.content response.status = 500 response.reason = str(e) elif isinstance(e, socket.timeout) or (isinstance(e, socket.error) and 'timed out' in str(e)): content = "Request Timeout" response = Response( { "content-type": "text/plain", "status": "408", "content-length": len(content) }) response.reason = "Request Timeout" else: content = str(e) response = Response( { "content-type": "text/plain", "status": "400", "content-length": len(content) }) response.reason = "Bad Request" else: raise return (response, content) class Response(dict): """An object more like email.Message than httplib.HTTPResponse.""" """Is this response from our local cache""" fromcache = False """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ version = 11 "Status code returned by server. " status = 200 """Reason phrase returned by server.""" reason = "Ok" previous = None def __init__(self, info): # info is either an email.Message or # an httplib.HTTPResponse object. if isinstance(info, httplib.HTTPResponse): for key, value in info.getheaders(): self[key.lower()] = value self.status = info.status self['status'] = str(self.status) self.reason = info.reason self.version = info.version elif isinstance(info, email.Message.Message): for key, value in info.items(): self[key] = value self.status = int(self['status']) else: for key, value in info.iteritems(): self[key] = value self.status = int(self.get('status', self.status)) def __getattr__(self, name): if name == 'dict': return self else: raise AttributeError, name
boegel/easybuild-framework
refs/heads/master
easybuild/toolchains/compiler/cuda.py
1
## # Copyright 2013-2020 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ Support for CUDA compilers as toolchain (co-)compiler. :author: Kenneth Hoste (Ghent University) """ from easybuild.tools.toolchain.compiler import Compiler TC_CONSTANT_CUDA = "CUDA" class Cuda(Compiler): """CUDA compiler class.""" COMPILER_CUDA_MODULE_NAME = ['CUDA'] COMPILER_CUDA_FAMILY = TC_CONSTANT_CUDA COMPILER_CUDA_UNIQUE_OPTS = { # handle '-gencode arch=X,code=Y' nvcc options (also -arch, -code) # -arch always needs to be specified, -code is optional (defaults to -arch if missing) # -gencode is syntactic sugar for combining -arch/-code # multiple values can be specified # examples: # * target v1.3 features, generate both object code and PTX for v1.3: # -gencode arch=compute_13,code=compute_13 -gencode arch=compute_13,code=sm_13 # * target v3.5 features, only generate object code for v3.5: # -gencode arch=compute_35,code=sm_35 # * target v2.0 features, generate object code for v2.0 and v3.5: # -gencode arch=compute_20,code=sm_20 -gencode arch=compute_20,code=sm_35 'cuda_gencode': ([], ("List of arguments for nvcc -gencode command line option, e.g., " "['arch=compute_20,code=sm_20', 'arch=compute_35,code=compute_35']")), } # always C++ compiler command, even for C! COMPILER_CUDA_UNIQUE_OPTION_MAP = { '_opt_CUDA_CC': 'ccbin="%(CXX_base)s"', '_opt_CUDA_CXX': 'ccbin="%(CXX_base)s"', } COMPILER_CUDA_CC = 'nvcc' COMPILER_CUDA_CXX = 'nvcc' LIB_CUDA_RUNTIME = ['rt', 'cudart'] def __init__(self, *args, **kwargs): """Constructor, with settings custom to CUDA.""" super(Cuda, self).__init__(*args, **kwargs) # append CUDA prefix to list of compiler prefixes self.prefixes.append(TC_CONSTANT_CUDA) def _set_compiler_vars(self): """Set the compiler variables""" # append lib dir paths to LDFLAGS (only if the paths are actually there) root = self.get_software_root('CUDA')[0] self.variables.append_subdirs("LDFLAGS", root, subdirs=["lib64", "lib"]) super(Cuda, self)._set_compiler_vars() def _set_compiler_flags(self): """Collect flags to set, and add them as variables.""" super(Cuda, self)._set_compiler_flags() # always C++ compiler flags, even for C! # note: using $LIBS will yield the use of -lcudart in Xlinker, which is silly, but fine cuda_flags = [ 'Xcompiler="%s"' % str(self.variables['CXXFLAGS']), 'Xlinker="%s %s"' % (str(self.variables['LDFLAGS']), str(self.variables['LIBS'])), ] self.variables.nextend('CUDA_CFLAGS', cuda_flags) self.variables.nextend('CUDA_CXXFLAGS', cuda_flags) # add gencode compiler flags to list of flags for compiler variables for gencode_val in self.options.get('cuda_gencode', []): gencode_option = 'gencode %s' % gencode_val self.variables.nappend('CUDA_CFLAGS', gencode_option) self.variables.nappend('CUDA_CXXFLAGS', gencode_option)
shivylp/xLisp
refs/heads/master
xLisp/evaluator.py
1
from environment import xLispEnvironment class xLispEvaluator(object): def __init__(self, env = None): if env is None: env = xLispEnvironment() self.env = env def evaluate(self, atom_generator): results = [] for atom in atom_generator: results.append(atom.eval(self.env)) return results[-1]
JesusMtnez/devexperto-challenge
refs/heads/master
jesusmtnez/python/koans/koans/about_strings.py
2
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutStrings(Koan): def test_double_quoted_strings_are_strings(self): string = "Hello, world." self.assertEqual(True, isinstance(string, str)) def test_single_quoted_strings_are_also_strings(self): string = 'Goodbye, world.' self.assertEqual(True, isinstance(string, str)) def test_triple_quote_strings_are_also_strings(self): string = """Howdy, world!""" self.assertEqual(True, isinstance(string, str)) def test_triple_single_quotes_work_too(self): string = '''Bonjour tout le monde!''' self.assertEqual(True, isinstance(string, str)) def test_raw_strings_are_also_strings(self): string = r"Konnichi wa, world!" self.assertEqual(True, isinstance(string, str)) def test_use_single_quotes_to_create_string_with_double_quotes(self): string = 'He said, "Go Away."' self.assertEqual("He said, \"Go Away.\"", string) def test_use_double_quotes_to_create_strings_with_single_quotes(self): string = "Don't" self.assertEqual('Don\'t', string) def test_use_backslash_for_escaping_quotes_in_strings(self): a = "He said, \"Don't\"" b = 'He said, "Don\'t"' self.assertEqual(True, (a == b)) def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self): string = "It was the best of times,\n\ It was the worst of times." self.assertEqual(52, len(string)) def test_triple_quoted_strings_can_span_lines(self): string = """ Howdy, world! """ self.assertEqual(15, len(string)) def test_triple_quoted_strings_need_less_escaping(self): a = "Hello \"world\"." b = """Hello "world".""" self.assertEqual(True, (a == b)) def test_escaping_quotes_at_the_end_of_triple_quoted_string(self): string = """Hello "world\"""" self.assertEqual('Hello "world"', string) def test_plus_concatenates_strings(self): string = "Hello, " + "world" self.assertEqual('Hello, world', string) def test_adjacent_strings_are_concatenated_automatically(self): string = "Hello" ", " "world" self.assertEqual('Hello, world', string) def test_plus_will_not_modify_original_strings(self): hi = "Hello, " there = "world" string = hi + there self.assertEqual('Hello, ', hi) self.assertEqual('world', there) def test_plus_equals_will_append_to_end_of_string(self): hi = "Hello, " there = "world" hi += there self.assertEqual('Hello, world', hi) def test_plus_equals_also_leaves_original_string_unmodified(self): original = "Hello, " hi = original there = "world" hi += there self.assertEqual('Hello, ', original) def test_most_strings_interpret_escape_characters(self): string = "\n" self.assertEqual('\n', string) self.assertEqual("""\n""", string) self.assertEqual(1, len(string))
pycassa/pycassa
refs/heads/master
pycassa/marshal.py
1
""" Tools for marshalling and unmarshalling data stored in Cassandra. """ import uuid import struct import calendar from datetime import datetime from decimal import Decimal import pycassa.util as util _number_types = frozenset((int, long, float)) def make_packer(fmt_string): return struct.Struct(fmt_string) _bool_packer = make_packer('>B') _float_packer = make_packer('>f') _double_packer = make_packer('>d') _long_packer = make_packer('>q') _int_packer = make_packer('>i') _short_packer = make_packer('>H') _BASIC_TYPES = ('BytesType', 'LongType', 'IntegerType', 'UTF8Type', 'AsciiType', 'LexicalUUIDType', 'TimeUUIDType', 'CounterColumnType', 'FloatType', 'DoubleType', 'DateType', 'BooleanType', 'UUIDType', 'Int32Type', 'DecimalType', 'TimestampType') def extract_type_name(typestr): if typestr is None: return 'BytesType' if "DynamicCompositeType" in typestr: return _get_composite_name(typestr) if "CompositeType" in typestr: return _get_composite_name(typestr) if "ReversedType" in typestr: return _get_inner_type(typestr) index = typestr.rfind('.') if index != -1: typestr = typestr[index + 1:] if typestr not in _BASIC_TYPES: typestr = 'BytesType' return typestr def _get_inner_type(typestr): """ Given a str like 'org.apache...ReversedType(LongType)', return just 'LongType' """ first_paren = typestr.find('(') return typestr[first_paren + 1:-1] def _get_inner_types(typestr): """ Given a str like 'org.apache...CompositeType(LongType, DoubleType)', return a tuple of the inner types, like ('LongType', 'DoubleType') """ internal_str = _get_inner_type(typestr) return map(str.strip, internal_str.split(',')) def _get_composite_name(typestr): types = map(extract_type_name, _get_inner_types(typestr)) return "CompositeType(" + ", ".join(types) + ")" def _to_timestamp(v): # Expects Value to be either date or datetime try: converted = calendar.timegm(v.utctimetuple()) converted = converted * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: # Ints and floats are valid timestamps too if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') converted = v * 1e3 return long(converted) def get_composite_packer(typestr=None, composite_type=None): assert (typestr or composite_type), "Must provide typestr or " + \ "CompositeType instance" if typestr: packers = map(packer_for, _get_inner_types(typestr)) elif composite_type: packers = [c.pack for c in composite_type.components] len_packer = _short_packer.pack def pack_composite(items, slice_start=None): last_index = len(items) - 1 s = '' for i, (item, packer) in enumerate(zip(items, packers)): eoc = '\x00' if isinstance(item, tuple): item, inclusive = item if inclusive: if slice_start: eoc = '\xff' elif slice_start is False: eoc = '\x01' else: if slice_start: eoc = '\x01' elif slice_start is False: eoc = '\xff' elif i == last_index: if slice_start: eoc = '\xff' elif slice_start is False: eoc = '\x01' packed = packer(item) s += ''.join((len_packer(len(packed)), packed, eoc)) return s return pack_composite def get_composite_unpacker(typestr=None, composite_type=None): assert (typestr or composite_type), "Must provide typestr or " + \ "CompositeType instance" if typestr: unpackers = map(unpacker_for, _get_inner_types(typestr)) elif composite_type: unpackers = [c.unpack for c in composite_type.components] len_unpacker = lambda v: _short_packer.unpack(v)[0] def unpack_composite(bytestr): # The composite format for each component is: # <len> <value> <eoc> # 2 bytes | ? bytes | 1 byte components = [] i = iter(unpackers) while bytestr: unpacker = i.next() length = len_unpacker(bytestr[:2]) components.append(unpacker(bytestr[2:2 + length])) bytestr = bytestr[3 + length:] return tuple(components) return unpack_composite def get_dynamic_composite_packer(typestr): cassandra_types = {} for inner_type in _get_inner_types(typestr): alias, cassandra_type = inner_type.split('=>') cassandra_types[alias] = cassandra_type len_packer = _short_packer.pack def pack_dynamic_composite(items, slice_start=None): last_index = len(items) - 1 s = '' i = 0 for (alias, item) in items: eoc = '\x00' if isinstance(alias, tuple): inclusive = item alias, item = alias if inclusive: if slice_start: eoc = '\xff' elif slice_start is False: eoc = '\x01' else: if slice_start: eoc = '\x01' elif slice_start is False: eoc = '\xff' elif i == last_index: if slice_start: eoc = '\xff' elif slice_start is False: eoc = '\x01' if isinstance(alias, str) and len(alias) == 1: header = '\x80' + alias packer = packer_for(cassandra_types[alias]) else: cassandra_type = str(alias).split('(')[0] header = len_packer(len(cassandra_type)) + cassandra_type packer = packer_for(cassandra_type) i += 1 packed = packer(item) s += ''.join((header, len_packer(len(packed)), packed, eoc)) return s return pack_dynamic_composite def get_dynamic_composite_unpacker(typestr): cassandra_types = {} for inner_type in _get_inner_types(typestr): alias, cassandra_type = inner_type.split('=>') cassandra_types[alias] = cassandra_type len_unpacker = lambda v: _short_packer.unpack(v)[0] def unpack_dynamic_composite(bytestr): # The composite format for each component is: # <header> <len> <value> <eoc> # ? bytes | 2 bytes | ? bytes | 1 byte types = [] components = [] while bytestr: header = len_unpacker(bytestr[:2]) if header & 0x8000: alias = bytestr[1] types.append(alias) unpacker = unpacker_for(cassandra_types[alias]) bytestr = bytestr[2:] else: cassandra_type = bytestr[2:2 + header] types.append(cassandra_type) unpacker = unpacker_for(cassandra_type) bytestr = bytestr[2 + header:] length = len_unpacker(bytestr[:2]) components.append(unpacker(bytestr[2:2 + length])) bytestr = bytestr[3 + length:] return tuple(zip(types, components)) return unpack_dynamic_composite def packer_for(typestr): if typestr is None: return lambda v: v if "DynamicCompositeType" in typestr: return get_dynamic_composite_packer(typestr) if "CompositeType" in typestr: return get_composite_packer(typestr) if "ReversedType" in typestr: return packer_for(_get_inner_type(typestr)) data_type = extract_type_name(typestr) if data_type in ('DateType', 'TimestampType'): def pack_date(v, _=None): return _long_packer.pack(_to_timestamp(v)) return pack_date elif data_type == 'BooleanType': def pack_bool(v, _=None): return _bool_packer.pack(bool(v)) return pack_bool elif data_type == 'DoubleType': def pack_double(v, _=None): return _double_packer.pack(v) return pack_double elif data_type == 'FloatType': def pack_float(v, _=None): return _float_packer.pack(v) return pack_float elif data_type == 'DecimalType': def pack_decimal(dec, _=None): sign, digits, exponent = dec.as_tuple() unscaled = int(''.join(map(str, digits))) if sign: unscaled *= -1 scale = _int_packer.pack(-exponent) unscaled = encode_int(unscaled) return scale + unscaled return pack_decimal elif data_type == 'LongType': def pack_long(v, _=None): return _long_packer.pack(v) return pack_long elif data_type == 'Int32Type': def pack_int32(v, _=None): return _int_packer.pack(v) return pack_int32 elif data_type == 'IntegerType': return encode_int elif data_type == 'UTF8Type': def pack_utf8(v, _=None): try: return v.encode('utf-8') except UnicodeDecodeError: # v is already utf-8 encoded return v return pack_utf8 elif 'UUIDType' in data_type: def pack_uuid(value, slice_start=None): if slice_start is None: value = util.convert_time_to_uuid(value, randomize=True) else: value = util.convert_time_to_uuid(value, lowest_val=slice_start, randomize=False) if not hasattr(value, 'bytes'): raise TypeError("%s is not valid for UUIDType" % value) return value.bytes return pack_uuid elif data_type == "CounterColumnType": def noop(value, slice_start=None): return value return noop else: # data_type == 'BytesType' or something unknown def pack_bytes(v, _=None): if not isinstance(v, basestring): raise TypeError("A str or unicode value was expected, " + "but %s was received instead (%s)" % (v.__class__.__name__, str(v))) return v return pack_bytes def unpacker_for(typestr): if typestr is None: return lambda v: v if "DynamicCompositeType" in typestr: return get_dynamic_composite_unpacker(typestr) if "CompositeType" in typestr: return get_composite_unpacker(typestr) if "ReversedType" in typestr: return unpacker_for(_get_inner_type(typestr)) data_type = extract_type_name(typestr) if data_type == 'BytesType': return lambda v: v elif data_type in ('DateType', 'TimestampType'): return lambda v: datetime.utcfromtimestamp( _long_packer.unpack(v)[0] / 1e3) elif data_type == 'BooleanType': return lambda v: bool(_bool_packer.unpack(v)[0]) elif data_type == 'DoubleType': return lambda v: _double_packer.unpack(v)[0] elif data_type == 'FloatType': return lambda v: _float_packer.unpack(v)[0] elif data_type == 'DecimalType': def unpack_decimal(v): scale = _int_packer.unpack(v[:4])[0] unscaled = decode_int(v[4:]) return Decimal('%de%d' % (unscaled, -scale)) return unpack_decimal elif data_type == 'LongType': return lambda v: _long_packer.unpack(v)[0] elif data_type == 'Int32Type': return lambda v: _int_packer.unpack(v)[0] elif data_type == 'IntegerType': return decode_int elif data_type == 'UTF8Type': return lambda v: v.decode('utf-8') elif 'UUIDType' in data_type: return lambda v: uuid.UUID(bytes=v) else: return lambda v: v def encode_int(x, *args): if x >= 0: out = [] while x >= 256: out.append(struct.pack('B', 0xff & x)) x >>= 8 out.append(struct.pack('B', 0xff & x)) if x > 127: out.append('\x00') else: x = -1 - x out = [] while x >= 256: out.append(struct.pack('B', 0xff & ~x)) x >>= 8 if x <= 127: out.append(struct.pack('B', 0xff & ~x)) else: out.append(struct.pack('>H', 0xffff & ~x)) return ''.join(reversed(out)) def decode_int(term, *args): if term != "": val = int(term.encode('hex'), 16) if (ord(term[0]) & 128) != 0: val = val - (1 << (len(term) * 8)) return val
jrversteegh/softsailor
refs/heads/master
deps/numpy-1.6.1/numpy/testing/numpytest.py
40
import os import sys import traceback __all__ = ['IgnoreException', 'importall',] DEBUG=0 get_frame = sys._getframe class IgnoreException(Exception): "Ignoring this exception due to disabled feature" def output_exception(printstream = sys.stdout): try: type, value, tb = sys.exc_info() info = traceback.extract_tb(tb) #this is more verbose #traceback.print_exc() filename, lineno, function, text = info[-1] # last line only msg = "%s:%d: %s: %s (in %s)\n" % ( filename, lineno, type.__name__, str(value), function) printstream.write(msg) finally: type = value = tb = None # clean up return def importall(package): """ Try recursively to import all subpackages under package. """ if isinstance(package,str): package = __import__(package) package_name = package.__name__ package_dir = os.path.dirname(package.__file__) for subpackage_name in os.listdir(package_dir): subdir = os.path.join(package_dir, subpackage_name) if not os.path.isdir(subdir): continue if not os.path.isfile(os.path.join(subdir,'__init__.py')): continue name = package_name+'.'+subpackage_name try: exec 'import %s as m' % (name) except Exception, msg: print 'Failed importing %s: %s' %(name, msg) continue importall(m) return
glovebx/odoo
refs/heads/8.0
addons/base_vat/base_vat.py
238
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import string import datetime import re _logger = logging.getLogger(__name__) try: import vatnumber except ImportError: _logger.warning("VAT validation partially unavailable because the `vatnumber` Python library cannot be found. " "Install it to support more countries, for example with `easy_install vatnumber`.") vatnumber = None from openerp.osv import fields, osv from openerp.tools.misc import ustr from openerp.tools.translate import _ _ref_vat = { 'at': 'ATU12345675', 'be': 'BE0477472701', 'bg': 'BG1234567892', 'ch': 'CHE-123.456.788 TVA or CH TVA 123456', #Swiss by Yannick Vaucher @ Camptocamp 'cy': 'CY12345678F', 'cz': 'CZ12345679', 'de': 'DE123456788', 'dk': 'DK12345674', 'ee': 'EE123456780', 'el': 'EL12345670', 'es': 'ESA12345674', 'fi': 'FI12345671', 'fr': 'FR32123456789', 'gb': 'GB123456782', 'gr': 'GR12345670', 'hu': 'HU12345676', 'hr': 'HR01234567896', # Croatia, contributed by Milan Tribuson 'ie': 'IE1234567FA', 'it': 'IT12345670017', 'lt': 'LT123456715', 'lu': 'LU12345613', 'lv': 'LV41234567891', 'mt': 'MT12345634', 'mx': 'MXABC123456T1B', 'nl': 'NL123456782B90', 'no': 'NO123456785', 'pe': 'PER10254824220 or PED10254824220', 'pl': 'PL1234567883', 'pt': 'PT123456789', 'ro': 'RO1234567897', 'se': 'SE123456789701', 'si': 'SI12345679', 'sk': 'SK0012345675', 'tr': 'TR1234567890 (VERGINO) veya TR12345678901 (TCKIMLIKNO)' # Levent Karakas @ Eska Yazilim A.S. } class res_partner(osv.osv): _inherit = 'res.partner' def _split_vat(self, vat): vat_country, vat_number = vat[:2].lower(), vat[2:].replace(' ', '') return vat_country, vat_number def simple_vat_check(self, cr, uid, country_code, vat_number, context=None): ''' Check the VAT number depending of the country. http://sima-pc.com/nif.php ''' if not ustr(country_code).encode('utf-8').isalpha(): return False check_func_name = 'check_vat_' + country_code check_func = getattr(self, check_func_name, None) or \ getattr(vatnumber, check_func_name, None) if not check_func: # No VAT validation available, default to check that the country code exists if country_code.upper() == 'EU': # Foreign companies that trade with non-enterprises in the EU # may have a VATIN starting with "EU" instead of a country code. return True res_country = self.pool.get('res.country') return bool(res_country.search(cr, uid, [('code', '=ilike', country_code)], context=context)) return check_func(vat_number) def vies_vat_check(self, cr, uid, country_code, vat_number, context=None): try: # Validate against VAT Information Exchange System (VIES) # see also http://ec.europa.eu/taxation_customs/vies/ return vatnumber.check_vies(country_code.upper()+vat_number) except Exception: # see http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl # Fault code may contain INVALID_INPUT, SERVICE_UNAVAILABLE, MS_UNAVAILABLE, # TIMEOUT or SERVER_BUSY. There is no way we can validate the input # with VIES if any of these arise, including the first one (it means invalid # country code or empty VAT number), so we fall back to the simple check. return self.simple_vat_check(cr, uid, country_code, vat_number, context=context) def button_check_vat(self, cr, uid, ids, context=None): if not self.check_vat(cr, uid, ids, context=context): msg = self._construct_constraint_msg(cr, uid, ids, context=context) raise osv.except_osv(_('Error!'), msg) return True def check_vat(self, cr, uid, ids, context=None): user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id if user_company.vat_check_vies: # force full VIES online check check_func = self.vies_vat_check else: # quick and partial off-line checksum validation check_func = self.simple_vat_check for partner in self.browse(cr, uid, ids, context=context): if not partner.vat: continue vat_country, vat_number = self._split_vat(partner.vat) if not check_func(cr, uid, vat_country, vat_number, context=context): _logger.info(_("Importing VAT Number [%s] is not valid !" % vat_number)) return False return True def vat_change(self, cr, uid, ids, value, context=None): return {'value': {'vat_subjected': bool(value)}} def _commercial_fields(self, cr, uid, context=None): return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['vat_subjected'] def _construct_constraint_msg(self, cr, uid, ids, context=None): def default_vat_check(cn, vn): # by default, a VAT number is valid if: # it starts with 2 letters # has more than 3 characters return cn[0] in string.ascii_lowercase and cn[1] in string.ascii_lowercase vat_country, vat_number = self._split_vat(self.browse(cr, uid, ids)[0].vat) vat_no = "'CC##' (CC=Country Code, ##=VAT Number)" error_partner = self.browse(cr, uid, ids, context=context) if default_vat_check(vat_country, vat_number): vat_no = _ref_vat[vat_country] if vat_country in _ref_vat else vat_no if self.pool['res.users'].browse(cr, uid, uid).company_id.vat_check_vies: return '\n' + _('The VAT number [%s] for partner [%s] either failed the VIES VAT validation check or did not respect the expected format %s.') % (error_partner[0].vat, error_partner[0].name, vat_no) return '\n' + _('The VAT number [%s] for partner [%s] does not seem to be valid. \nNote: the expected format is %s') % (error_partner[0].vat, error_partner[0].name, vat_no) _constraints = [(check_vat, _construct_constraint_msg, ["vat"])] __check_vat_ch_re1 = re.compile(r'(MWST|TVA|IVA)[0-9]{6}$') __check_vat_ch_re2 = re.compile(r'E([0-9]{9}|-[0-9]{3}\.[0-9]{3}\.[0-9]{3})(MWST|TVA|IVA)$') def check_vat_ch(self, vat): ''' Check Switzerland VAT number. ''' # VAT number in Switzerland will change between 2011 and 2013 # http://www.estv.admin.ch/mwst/themen/00154/00589/01107/index.html?lang=fr # Old format is "TVA 123456" we will admit the user has to enter ch before the number # Format will becomes such as "CHE-999.999.99C TVA" # Both old and new format will be accepted till end of 2013 # Accepted format are: (spaces are ignored) # CH TVA ###### # CH IVA ###### # CH MWST ####### # # CHE#########MWST # CHE#########TVA # CHE#########IVA # CHE-###.###.### MWST # CHE-###.###.### TVA # CHE-###.###.### IVA # if self.__check_vat_ch_re1.match(vat): return True match = self.__check_vat_ch_re2.match(vat) if match: # For new TVA numbers, do a mod11 check num = filter(lambda s: s.isdigit(), match.group(1)) # get the digits only factor = (5,4,3,2,7,6,5,4) csum = sum([int(num[i]) * factor[i] for i in range(8)]) check = (11 - (csum % 11)) % 11 return check == int(num[8]) return False def _ie_check_char(self, vat): vat = vat.zfill(8) extra = 0 if vat[7] not in ' W': if vat[7].isalpha(): extra = 9 * (ord(vat[7]) - 64) else: # invalid return -1 checksum = extra + sum((8-i) * int(x) for i, x in enumerate(vat[:7])) return 'WABCDEFGHIJKLMNOPQRSTUV'[checksum % 23] def check_vat_ie(self, vat): """ Temporary Ireland VAT validation to support the new format introduced in January 2013 in Ireland, until upstream is fixed. TODO: remove when fixed upstream""" if len(vat) not in (8, 9) or not vat[2:7].isdigit(): return False if len(vat) == 8: # Normalize pre-2013 numbers: final space or 'W' not significant vat += ' ' if vat[:7].isdigit(): return vat[7] == self._ie_check_char(vat[:7] + vat[8]) elif vat[1] in (string.ascii_uppercase + '+*'): # Deprecated format # See http://www.revenue.ie/en/online/third-party-reporting/reporting-payment-details/faqs.html#section3 return vat[7] == self._ie_check_char(vat[2:7] + vat[0] + vat[8]) return False # Mexican VAT verification, contributed by Vauxoo # and Panos Christeas <p_christ@hol.gr> __check_vat_mx_re = re.compile(r"(?P<primeras>[A-Za-z\xd1\xf1&]{3,4})" \ r"[ \-_]?" \ r"(?P<ano>[0-9]{2})(?P<mes>[01][0-9])(?P<dia>[0-3][0-9])" \ r"[ \-_]?" \ r"(?P<code>[A-Za-z0-9&\xd1\xf1]{3})$") def check_vat_mx(self, vat): ''' Mexican VAT verification Verificar RFC México ''' # we convert to 8-bit encoding, to help the regex parse only bytes vat = ustr(vat).encode('iso8859-1') m = self.__check_vat_mx_re.match(vat) if not m: #No valid format return False try: ano = int(m.group('ano')) if ano > 30: ano = 1900 + ano else: ano = 2000 + ano datetime.date(ano, int(m.group('mes')), int(m.group('dia'))) except ValueError: return False #Valid format and valid date return True # Norway VAT validation, contributed by Rolv Råen (adEgo) <rora@adego.no> def check_vat_no(self, vat): ''' Check Norway VAT number.See http://www.brreg.no/english/coordination/number.html ''' if len(vat) != 9: return False try: int(vat) except ValueError: return False sum = (3 * int(vat[0])) + (2 * int(vat[1])) + \ (7 * int(vat[2])) + (6 * int(vat[3])) + \ (5 * int(vat[4])) + (4 * int(vat[5])) + \ (3 * int(vat[6])) + (2 * int(vat[7])) check = 11 -(sum % 11) if check == 11: check = 0 if check == 10: # 10 is not a valid check digit for an organization number return False return check == int(vat[8]) # Peruvian VAT validation, contributed by Vauxoo def check_vat_pe(self, vat): vat_type,vat = vat and len(vat)>=2 and (vat[0], vat[1:]) or (False, False) if vat_type and vat_type.upper() == 'D': #DNI return True elif vat_type and vat_type.upper() == 'R': #verify RUC factor = '5432765432' sum = 0 dig_check = False if len(vat) != 11: return False try: int(vat) except ValueError: return False for f in range(0,10): sum += int(factor[f]) * int(vat[f]) subtraction = 11 - (sum % 11) if subtraction == 10: dig_check = 0 elif subtraction == 11: dig_check = 1 else: dig_check = subtraction return int(vat[10]) == dig_check else: return False # VAT validation in Turkey, contributed by # Levent Karakas @ Eska Yazilim A.S. def check_vat_tr(self, vat): if not (10 <= len(vat) <= 11): return False try: int(vat) except ValueError: return False # check vat number (vergi no) if len(vat) == 10: sum = 0 check = 0 for f in range(0,9): c1 = (int(vat[f]) + (9-f)) % 10 c2 = ( c1 * (2 ** (9-f)) ) % 9 if (c1 != 0) and (c2 == 0): c2 = 9 sum += c2 if sum % 10 == 0: check = 0 else: check = 10 - (sum % 10) return int(vat[9]) == check # check personal id (tc kimlik no) if len(vat) == 11: c1a = 0 c1b = 0 c2 = 0 for f in range(0,9,2): c1a += int(vat[f]) for f in range(1,9,2): c1b += int(vat[f]) c1 = ( (7 * c1a) - c1b) % 10 for f in range(0,10): c2 += int(vat[f]) c2 = c2 % 10 return int(vat[9]) == c1 and int(vat[10]) == c2 return False # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
duncan-r/SHIP
refs/heads/develop
ship/fmp/datunits/spillunit.py
1
""" Summary: Contains the SpillUnit class. This holds all of the data read in from the spill units in the dat file. Can be called to load in the data and read and update the contents held in the object. Author: Duncan R. Created: 01 Apr 2016 Copyright: Duncan Runnacles 2016 TODO: Updates: """ from __future__ import unicode_literals from ship.fmp.datunits.isisunit import AUnit from ship.fmp.datunits import ROW_DATA_TYPES as rdt from ship.datastructures import dataobject as do from ship.datastructures.rowdatacollection import RowDataCollection from ship.fmp.headdata import HeadDataItem from ship.datastructures import DATA_TYPES as dt import logging logger = logging.getLogger(__name__) """logging references with a __name__ set to this module.""" class SpillUnit (AUnit): """Concrete implementation of AUnit storing Isis Spill Unit data. Contains a reference to a rowdatacollection for storing and accessing all the row data. i.e. the geometry data for the section, containing the chainage, elevation, etc values. Methods for accessing the data in these objects and adding removing rows are available. See Also: AUnit """ UNIT_TYPE = 'spill' UNIT_CATEGORY = 'spill' FILE_KEY = 'SPILL' FILE_KEY2 = None def __init__(self, **kwargs): """Constructor. Args: fileOrder (int): The location of this unit in the file. """ super(SpillUnit, self).__init__(**kwargs) self._name = 'Spl' self._name_ds = 'SplDS' self.head_data = { 'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING), 'weir_coef': HeadDataItem(1.700, '{:>10}', 1, 0, dtype=dt.FLOAT, dps=3), 'modular_limit': HeadDataItem(0.700, '{:>10}', 1, 2, dtype=dt.FLOAT, dps=3), } self._unit_type = SpillUnit.UNIT_TYPE self._unit_category = SpillUnit.UNIT_CATEGORY dobjs = [ do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3, update_callback=self.checkIncreases), do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3), do.FloatData(rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.00), do.FloatData(rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.00), ] self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs) self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0}) def icLabels(self): return [self._name, self._name_ds] def linkLabels(self): """Overriddes superclass method.""" return {'name': self.name, 'name_ds': self.name_ds} def readUnitData(self, unit_data, file_line): """Reads the unit data into the geometry objects. Args: unit_data (list): The part of the isis dat file pertaining to this section See Also: AUnit - readUnitData() """ file_line = self._readHeadData(unit_data, file_line) file_line = self._readRowData(unit_data, file_line) return file_line - 1 def _readHeadData(self, unit_data, file_line): """Reads the data in the file header section into the class. Args: unit_data (list): contains data for this unit. """ self.head_data['comment'].value = unit_data[file_line][5:].strip() self._name = unit_data[file_line + 1][:12].strip() self._name_ds = unit_data[file_line + 1][12:24].strip() self.head_data['weir_coef'].value = unit_data[file_line + 2][:10].strip() self.head_data['modular_limit'].value = unit_data[file_line + 2][10:20].strip() return file_line + 3 def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Spill Units of the dat file. Args: unit_data: the data pertaining to this unit. """ self.unit_length = int(unit_data[file_line].strip()) file_line += 1 out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): chain = unit_data[i][0:10].strip() elev = unit_data[i][10:20].strip() east = None north = None ''' In some edge cases there are no values set in the file for the easting and northing, so use defaults. this actually checks that they are both there, e starts at 21, n starts at 31 ''' if len(unit_data[i]) > 31: east = unit_data[i][20:30].strip() north = unit_data[i][30:40].strip() self.row_data['main'].addRow({ rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.EASTING: east, rdt.NORTHING: north }, no_copy=True) except NotImplementedError: logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError') raise return out_line def getData(self): """Retrieve the data in this unit. The String[] returned is formatted for printing in the fashion of the .dat file. Returns: list of output data formated the same as in the .DAT file. """ num_rows = self.row_data['main'].numberOfRows() out_data = self._getHeadData(num_rows) out_data.extend(self._getRowData(num_rows)) return out_data def _getRowData(self, num_rows): """Get the data in the row collection. For all the rows in the spill geometry section get the data from the rowdatacollection class. Returns: list containing the formatted unit rows. """ out_data = [] for i in range(0, num_rows): out_data.append(self.row_data['main'].getPrintableRow(i)) return out_data def _getHeadData(self, num_rows): """Get the header data formatted for printing out. Returns: list - contining the formatted head data. """ out = [] out.append('SPILL ' + self.head_data['comment'].value) out.append('{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds)) out.append(self.head_data['weir_coef'].format() + self.head_data['modular_limit'].format()) out.append('{:>10}'.format(num_rows)) return out # def addDataRow(self, chainage, elevation, index=None, easting = 0.00, northing = 0.00): def addRow(self, row_vals, rowdata_key='main', index=None, **kwargs): """Adds a new row to the spill unit. Ensures that certain requirements of the data rows, such as the chainage needing to increase for each row down are met, then call the addNewRow() method in the row_collection. Args: row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal value assigned for the DataType. Chainage and Elevation MUST be included. index=None(int): the row to insert into. The existing row at the given index will be moved up by one. Returns: False if the addNewRow() method is unsuccessful. Raises: IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObjects. See Also: ADataObject and subclasses for information on the parameters. """ keys = row_vals.keys() if not rdt.CHAINAGE in keys or not rdt.ELEVATION in keys: raise AttributeError('row_vals must include CHAINAGE and ELEVATION.') # Call superclass method to add the new row AUnit.addRow(self, row_vals, index=index, **kwargs)
oberlin/django
refs/heads/master
tests/m2m_through_regress/tests.py
89
from __future__ import unicode_literals from django.contrib.auth.models import User from django.core import management from django.test import TestCase from django.utils.six import StringIO from .models import ( Car, CarDriver, Driver, Group, Membership, Person, UserMembership, ) class M2MThroughTestCase(TestCase): @classmethod def setUpTestData(cls): cls.bob = Person.objects.create(name="Bob") cls.jim = Person.objects.create(name="Jim") cls.rock = Group.objects.create(name="Rock") cls.roll = Group.objects.create(name="Roll") cls.frank = User.objects.create_user("frank", "frank@example.com", "password") cls.jane = User.objects.create_user("jane", "jane@example.com", "password") # normal intermediate model cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock) cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50) cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50) # intermediate model with custom id column cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock) cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll) cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock) def test_retrieve_reverse_m2m_items(self): self.assertQuerysetEqual( self.bob.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) def test_retrieve_forward_m2m_items(self): self.assertQuerysetEqual( self.roll.members.all(), [ "<Person: Bob>", ] ) def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, setattr, self.bob, "group_set", []) def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, setattr, self.roll, "members", []) def test_cannot_use_create_on_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, self.rock.members.create, name="Anne") def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk") def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self): self.assertQuerysetEqual( self.frank.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self): self.assertQuerysetEqual( self.roll.user_members.all(), [ "<User: frank>", ] ) def test_join_trimming_forwards(self): "Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254" self.assertQuerysetEqual( self.rock.members.filter(membership__price=50), [ "<Person: Jim>", ] ) def test_join_trimming_reverse(self): self.assertQuerysetEqual( self.bob.group_set.filter(membership__price=50), [ "<Group: Roll>", ] ) class M2MThroughSerializationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.bob = Person.objects.create(name="Bob") cls.roll = Group.objects.create(name="Roll") cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll) def test_serialization(self): "m2m-through models aren't serialized as m2m fields. Refs #8134" pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk} out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual( out.getvalue().strip(), '[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": ' '100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": ' '"Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]' % pks ) out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out) self.assertXMLEqual(out.getvalue().strip(), """ <?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object pk="%(m_pk)s" model="m2m_through_regress.membership"> <field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field> <field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field> <field type="IntegerField" name="price">100</field> </object> <object pk="%(p_pk)s" model="m2m_through_regress.person"> <field type="CharField" name="name">Bob</field> </object> <object pk="%(g_pk)s" model="m2m_through_regress.group"> <field type="CharField" name="name">Roll</field> </object> </django-objects> """.strip() % pks) class ToFieldThroughTests(TestCase): def setUp(self): self.car = Car.objects.create(make="Toyota") self.driver = Driver.objects.create(name="Ryan Briscoe") CarDriver.objects.create(car=self.car, driver=self.driver) # We are testing if wrong objects get deleted due to using wrong # field value in m2m queries. So, it is essential that the pk # numberings do not match. # Create one intentionally unused driver to mix up the autonumbering self.unused_driver = Driver.objects.create(name="Barney Gumble") # And two intentionally unused cars. self.unused_car1 = Car.objects.create(make="Trabant") self.unused_car2 = Car.objects.create(make="Wartburg") def test_to_field(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) def test_to_field_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) def test_to_field_clear_reverse(self): self.driver.car_set.clear() self.assertQuerysetEqual( self.driver.car_set.all(), []) def test_to_field_clear(self): self.car.drivers.clear() self.assertQuerysetEqual( self.car.drivers.all(), []) # Low level tests for _add_items and _remove_items. We test these methods # because .add/.remove aren't available for m2m fields with through, but # through is the only way to set to_field currently. We do want to make # sure these methods are ready if the ability to use .add or .remove with # to_field relations is added some day. def test_add(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) # Yikes - barney is going to drive... self.car.drivers._add_items('car', 'driver', self.unused_driver) self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"] ) def test_add_null(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): nullcar.drivers._add_items('car', 'driver', self.unused_driver) def test_add_related_null(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): self.car.drivers._add_items('car', 'driver', nulldriver) def test_add_reverse(self): car2 = Car.objects.create(make="Honda") self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._add_items('driver', 'car', car2) self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>", "<Car: Honda>"], ordered=False ) def test_add_null_reverse(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): self.driver.car_set._add_items('driver', 'car', nullcar) def test_add_null_reverse_related(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): nulldriver.car_set._add_items('driver', 'car', self.car) def test_remove(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) self.car.drivers._remove_items('car', 'driver', self.driver) self.assertQuerysetEqual( self.car.drivers.all(), []) def test_remove_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._remove_items('driver', 'car', self.car) self.assertQuerysetEqual( self.driver.car_set.all(), []) class ThroughLoadDataTestCase(TestCase): fixtures = ["m2m_through"] def test_sequence_creation(self): """ Sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table (#11107). """ out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual( out.getvalue().strip(), '[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user"' ': 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, ' '"model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]' )
erichuang1994/tornado
refs/heads/master
maint/scripts/run_fixers.py
124
#!/usr/bin/env python # Usage is like 2to3: # $ maint/scripts/run_fixers.py -wn --no-diffs tornado import sys from lib2to3.main import main sys.exit(main("custom_fixers"))
rdblue/Impala
refs/heads/cdh5-trunk
thirdparty/hive-1.1.0-cdh5.5.0-SNAPSHOT/lib/py/fb303_scripts/__init__.py
214
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # __all__ = ['fb303_simple_mgmt']
helinb/Tickeys-linux
refs/heads/master
tickeys/kivy/uix/videoplayer.py
6
''' Video player ============ .. versionadded:: 1.2.0 The video player widget can be used to play video and let the user control the play/pausing, volume and position. The widget cannot be customized much because of the complex assembly of numerous base widgets. .. image:: images/videoplayer.jpg :align: center Annotations ----------- If you want to display text at a specific time and for a certain duration, consider annotations. An annotation file has a ".jsa" extension. The player will automatically load the associated annotation file if it exists. An annotation file is JSON-based, providing a list of label dictionary items. The key and value must match one of the :class:`VideoPlayerAnnotation` items. For example, here is a short version of a jsa file that you can find in `examples/widgets/softboy.jsa`:: [ {"start": 0, "duration": 2, "text": "This is an example of annotation"}, {"start": 2, "duration": 2, "bgcolor": [0.5, 0.2, 0.4, 0.5], "text": "You can change the background color"} ] For our softboy.avi example, the result will be: .. image:: images/videoplayer-annotation.jpg :align: center If you want to experiment with annotation files, test with:: python -m kivy.uix.videoplayer examples/widgets/softboy.avi Fullscreen ---------- The video player can play the video in fullscreen, if :attr:`VideoPlayer.allow_fullscreen` is activated by a double-tap on the video. By default, if the video is smaller than the Window, it will be not stretched. You can allow stretching by passing custom options to a :class:`VideoPlayer` instance:: player = VideoPlayer(source='myvideo.avi', state='play', options={'allow_stretch': True}) End-of-stream behavior ---------------------- You can specify what happens when the video has finished playing by passing an `eos` (end of stream) directive to the underlying :class:`~kivy.core.video.VideoBase` class. `eos` can be one of 'stop', 'pause' or 'loop' and defaults to 'stop'. For example, in order to loop the video:: player = VideoPlayer(source='myvideo.avi', state='play', options={'eos': 'loop'}) .. note:: The `eos` property of the VideoBase class is a string specifying the end-of-stream behavior. This property differs from the `eos` properties of the :class:`VideoPlayer` and :class:`~kivy.uix.video.Video` classes, whose `eos` property is simply a boolean indicating that the end of the file has been reached. ''' __all__ = ('VideoPlayer', 'VideoPlayerAnnotation') from json import load from os.path import exists from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, \ NumericProperty, DictProperty, OptionProperty from kivy.animation import Animation from kivy.uix.gridlayout import GridLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.progressbar import ProgressBar from kivy.uix.label import Label from kivy.uix.video import Video from kivy.uix.video import Image from kivy.factory import Factory from kivy.logger import Logger from kivy.clock import Clock class VideoPlayerVolume(Image): video = ObjectProperty(None) def on_touch_down(self, touch): if not self.collide_point(*touch.pos): return False touch.grab(self) # save the current volume and delta to it touch.ud[self.uid] = [self.video.volume, 0] return True def on_touch_move(self, touch): if touch.grab_current is not self: return # calculate delta dy = abs(touch.y - touch.oy) if dy > 10: dy = min(dy - 10, 100) touch.ud[self.uid][1] = dy self.video.volume = dy / 100. return True def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) dy = abs(touch.y - touch.oy) if dy < 10: if self.video.volume > 0: self.video.volume = 0 else: self.video.volume = 1. class VideoPlayerPlayPause(Image): video = ObjectProperty(None) def on_touch_down(self, touch): '''.. versionchanged:: 1.4.0''' if self.collide_point(*touch.pos): if self.video.state == 'play': self.video.state = 'pause' else: self.video.state = 'play' return True class VideoPlayerStop(Image): video = ObjectProperty(None) def on_touch_down(self, touch): if self.collide_point(*touch.pos): self.video.state = 'stop' self.video.position = 0 return True class VideoPlayerProgressBar(ProgressBar): video = ObjectProperty(None) seek = NumericProperty(None, allownone=True) alpha = NumericProperty(1.) def __init__(self, **kwargs): super(VideoPlayerProgressBar, self).__init__(**kwargs) self.bubble = Factory.Bubble(size=(50, 44)) self.bubble_label = Factory.Label(text='0:00') self.bubble.add_widget(self.bubble_label) self.add_widget(self.bubble) self.bind(pos=self._update_bubble, size=self._update_bubble, seek=self._update_bubble) def on_video(self, instance, value): self.video.bind(position=self._update_bubble, state=self._showhide_bubble) def on_touch_down(self, touch): if not self.collide_point(*touch.pos): return self._show_bubble() touch.grab(self) self._update_seek(touch.x) return True def on_touch_move(self, touch): if touch.grab_current is not self: return self._update_seek(touch.x) return True def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) if self.seek: self.video.seek(self.seek) self.seek = None self._hide_bubble() return True def _update_seek(self, x): if self.width == 0: return x = max(self.x, min(self.right, x)) - self.x self.seek = x / float(self.width) def _show_bubble(self): self.alpha = 1 Animation.stop_all(self, 'alpha') def _hide_bubble(self): self.alpha = 1. Animation(alpha=0, d=4, t='in_out_expo').start(self) def on_alpha(self, instance, value): self.bubble.background_color = (1, 1, 1, value) self.bubble_label.color = (1, 1, 1, value) def _update_bubble(self, *l): seek = self.seek if self.seek is None: if self.video.duration == 0: seek = 0 else: seek = self.video.position / self.video.duration # convert to minutes:seconds d = self.video.duration * seek minutes = int(d / 60) seconds = int(d - (minutes * 60)) # fix bubble label & position self.bubble_label.text = '%d:%02d' % (minutes, seconds) self.bubble.center_x = self.x + seek * self.width self.bubble.y = self.top def _showhide_bubble(self, instance, value): if value == 'play': self._hide_bubble() else: self._show_bubble() class VideoPlayerPreview(FloatLayout): source = ObjectProperty(None) video = ObjectProperty(None) click_done = BooleanProperty(False) def on_touch_down(self, touch): if self.collide_point(*touch.pos) and not self.click_done: self.click_done = True self.video.state = 'play' return True class VideoPlayerAnnotation(Label): '''Annotation class used for creating annotation labels. Additional keys are available: * bgcolor: [r, g, b, a] - background color of the text box * bgsource: 'filename' - background image used for the background text box * border: (n, e, s, w) - border used for the background image ''' start = NumericProperty(0) '''Start time of the annotation. :attr:`start` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' duration = NumericProperty(1) '''Duration of the annotation. :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and defaults to 1. ''' annotation = DictProperty({}) def on_annotation(self, instance, ann): for key, value in ann.items(): setattr(self, key, value) class VideoPlayer(GridLayout): '''VideoPlayer class. See module documentation for more information. ''' source = StringProperty('') '''Source of the video to read. :attr:`source` is a :class:`~kivy.properties.StringProperty` and defaults to ''. .. versionchanged:: 1.4.0 ''' thumbnail = StringProperty('') '''Thumbnail of the video to show. If None, VideoPlayer will try to find the thumbnail from the :attr:`source` + '.png'. :attr:`thumbnail` a :class:`~kivy.properties.StringProperty` and defaults to ''. .. versionchanged:: 1.4.0 ''' duration = NumericProperty(-1) '''Duration of the video. The duration defaults to -1 and is set to the real duration when the video is loaded. :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and defaults to -1. ''' position = NumericProperty(0) '''Position of the video between 0 and :attr:`duration`. The position defaults to -1 and is set to the real position when the video is loaded. :attr:`position` is a :class:`~kivy.properties.NumericProperty` and defaults to -1. ''' volume = NumericProperty(1.0) '''Volume of the video in the range 0-1. 1 means full volume and 0 means mute. :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults to 1. ''' state = OptionProperty('stop', options=('play', 'pause', 'stop')) '''String, indicates whether to play, pause, or stop the video:: # start playing the video at creation video = VideoPlayer(source='movie.mkv', state='play') # create the video, and start later video = VideoPlayer(source='movie.mkv') # and later video.state = 'play' :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults to 'play'. ''' play = BooleanProperty(False) ''' .. deprecated:: 1.4.0 Use :attr:`state` instead. Boolean, indicates whether the video is playing or not. You can start/stop the video by setting this property:: # start playing the video at creation video = VideoPlayer(source='movie.mkv', play=True) # create the video, and start later video = VideoPlayer(source='movie.mkv') # and later video.play = True :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' image_overlay_play = StringProperty( 'atlas://data/images/defaulttheme/player-play-overlay') '''Image filename used to show a "play" overlay when the video has not yet started. :attr:`image_overlay_play` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/player-play-overlay'. ''' image_loading = StringProperty('data/images/image-loading.gif') '''Image filename used when the video is loading. :attr:`image_loading` is a :class:`~kivy.properties.StringProperty` and defaults to 'data/images/image-loading.gif'. ''' image_play = StringProperty( 'atlas://data/images/defaulttheme/media-playback-start') '''Image filename used for the "Play" button. :attr:`image_play` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/media-playback-start'. ''' image_stop = StringProperty( 'atlas://data/images/defaulttheme/media-playback-stop') '''Image filename used for the "Stop" button. :attr:`image_stop` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/media-playback-stop'. ''' image_pause = StringProperty( 'atlas://data/images/defaulttheme/media-playback-pause') '''Image filename used for the "Pause" button. :attr:`image_pause` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/media-playback-pause'. ''' image_volumehigh = StringProperty( 'atlas://data/images/defaulttheme/audio-volume-high') '''Image filename used for the volume icon when the volume is high. :attr:`image_volumehigh` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/audio-volume-high'. ''' image_volumemedium = StringProperty( 'atlas://data/images/defaulttheme/audio-volume-medium') '''Image filename used for the volume icon when the volume is medium. :attr:`image_volumemedium` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/audio-volume-medium'. ''' image_volumelow = StringProperty( 'atlas://data/images/defaulttheme/audio-volume-low') '''Image filename used for the volume icon when the volume is low. :attr:`image_volumelow` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/audio-volume-low'. ''' image_volumemuted = StringProperty( 'atlas://data/images/defaulttheme/audio-volume-muted') '''Image filename used for the volume icon when the volume is muted. :attr:`image_volumemuted` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/audio-volume-muted'. ''' annotations = StringProperty('') '''If set, it will be used for reading annotations box. :attr:`annotations` is a :class:`~kivy.properties.StringProperty` and defaults to ''. ''' fullscreen = BooleanProperty(False) '''Switch to fullscreen view. This should be used with care. When activated, the widget will remove itself from its parent, remove all children from the window and will add itself to it. When fullscreen is unset, all the previous children are restored and the widget is restored to its previous parent. .. warning:: The re-add operation doesn't care about the index position of it's children within the parent. :attr:`fullscreen` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' allow_fullscreen = BooleanProperty(True) '''By default, you can double-tap on the video to make it fullscreen. Set this property to False to prevent this behavior. :attr:`allow_fullscreen` is a :class:`~kivy.properties.BooleanProperty` defaults to True. ''' options = DictProperty({}) '''Optional parameters can be passed to a :class:`~kivy.uix.video.Video` instance with this property. :attr:`options` a :class:`~kivy.properties.DictProperty` and defaults to {}. ''' # internals container = ObjectProperty(None) def __init__(self, **kwargs): self._video = None self._image = None self._annotations = '' self._annotations_labels = [] super(VideoPlayer, self).__init__(**kwargs) self._load_thumbnail() self._load_annotations() if self.source: self._trigger_video_load() def _trigger_video_load(self, *largs): Clock.unschedule(self._do_video_load) Clock.schedule_once(self._do_video_load, -1) def on_source(self, instance, value): # we got a value, try to see if we have an image for it self._load_thumbnail() self._load_annotations() if self._video is not None: self._video.unload() self._video = None if value: self._trigger_video_load() def on_image_overlay_play(self, instance, value): self._image.image_overlay_play = value def on_image_loading(self, instance, value): self._image.image_loading = value def _load_thumbnail(self): if not self.container: return self.container.clear_widgets() # get the source, remove extension, and use png thumbnail = self.thumbnail if not thumbnail: filename = self.source.rsplit('.', 1) thumbnail = filename[0] + '.png' self._image = VideoPlayerPreview(source=thumbnail, video=self) self.container.add_widget(self._image) def _load_annotations(self): if not self.container: return self._annotations_labels = [] annotations = self.annotations if not annotations: filename = self.source.rsplit('.', 1) annotations = filename[0] + '.jsa' if exists(annotations): with open(annotations, 'r') as fd: self._annotations = load(fd) if self._annotations: for ann in self._annotations: self._annotations_labels.append( VideoPlayerAnnotation(annotation=ann)) def on_state(self, instance, value): if self._video is not None: self._video.state = value def _set_state(self, instance, value): self.state = value def _do_video_load(self, *largs): self._video = Video(source=self.source, state=self.state, volume=self.volume, pos_hint={'x': 0, 'y': 0}, **self.options) self._video.bind(texture=self._play_started, duration=self.setter('duration'), position=self.setter('position'), volume=self.setter('volume'), state=self._set_state) def on_play(self, instance, value): value = 'play' if value else 'stop' return self.on_state(instance, value) def on_volume(self, instance, value): if not self._video: return self._video.volume = value def on_position(self, instance, value): labels = self._annotations_labels if not labels: return for label in labels: start = label.start duration = label.duration if start > value or (start + duration) < value: if label.parent: label.parent.remove_widget(label) elif label.parent is None: self.container.add_widget(label) def seek(self, percent): '''Change the position to a percentage of the duration. Percentage must be a value between 0-1. .. warning:: Calling seek() before video is loaded has no effect. ''' if not self._video: return self._video.seek(percent) def _play_started(self, instance, value): self.container.clear_widgets() self.container.add_widget(self._video) def on_touch_down(self, touch): if not self.collide_point(*touch.pos): return False if touch.is_double_tap and self.allow_fullscreen: self.fullscreen = not self.fullscreen return True return super(VideoPlayer, self).on_touch_down(touch) def on_fullscreen(self, instance, value): window = self.get_parent_window() if not window: Logger.warning('VideoPlayer: Cannot switch to fullscreen, ' 'window not found.') if value: self.fullscreen = False return if not self.parent: Logger.warning('VideoPlayer: Cannot switch to fullscreen, ' 'no parent.') if value: self.fullscreen = False return if value: self._fullscreen_state = state = { 'parent': self.parent, 'pos': self.pos, 'size': self.size, 'pos_hint': self.pos_hint, 'size_hint': self.size_hint, 'window_children': window.children[:]} # remove all window children for child in window.children[:]: window.remove_widget(child) # put the video in fullscreen if state['parent'] is not window: state['parent'].remove_widget(self) window.add_widget(self) # ensure the video widget is in 0, 0, and the size will be # reajusted self.pos = (0, 0) self.size = (100, 100) self.pos_hint = {} self.size_hint = (1, 1) else: state = self._fullscreen_state window.remove_widget(self) for child in state['window_children']: window.add_widget(child) self.pos_hint = state['pos_hint'] self.size_hint = state['size_hint'] self.pos = state['pos'] self.size = state['size'] if state['parent'] is not window: state['parent'].add_widget(self) if __name__ == '__main__': import sys from kivy.base import runTouchApp player = VideoPlayer(source=sys.argv[1]) runTouchApp(player) if player: player.state = 'stop'
cedricbonhomme/shelter-database
refs/heads/master
shelter/scripts/export_shelters.py
1
#! /usr/bin/python # -*- coding:utf-8 -* import csv import os.path import sys from web.models import ( Section, Category, Attribute, Property, Value, Association, Shelter, ) from bootstrap import db def export_shelters(dump_file, truncate=""): if truncate == "overwrite": pass elif os.path.isfile(dump_file): print( "Export aborted: dump file already exists. Choose another filename or enable overwriting" ) sys.exit(1) else: pass Subcategory = db.aliased(Category) headersquery = ( db.session.query( Section.name.label("section"), Category.name.label("category"), Subcategory.name.label("subcategory"), Attribute.name.label("attribute"), ) .join(Category, Category.section_id == Section.id) .join(Subcategory, Subcategory.parent_id == Category.id) .join(Attribute, Attribute.category_id == Subcategory.id) ) dataquery = ( db.session.query( Property.attribute_id.label("att_id"), Property.shelter_id, Value.name.label("value"), ) .join(Association, Association.property_id == Property.id) .join(Value, Value.id == Association.value_id) .join(Attribute, Attribute.id == Property.attribute_id) .join(Shelter, Shelter.id == Property.shelter_id) .order_by(Property.shelter_id, Property.attribute_id) ) # print(dataquery) array = [] for index, row in enumerate(headersquery): section = row.section category = row.category # insert blanks instead of duplicate sections / categories if index != 0: if row.section == prev_row.section: section = "" if row.category == prev_row.category: category = "" array += [[section, category, row.subcategory, row.attribute]] prev_row = row columns_length = len(array) ###dump published shelters pubdataquery = dataquery.filter(Shelter.is_published == True) pubfilename = dump_file + "shelterdump_published.csv" with open(pubfilename, mode="wt", newline="", encoding="utf-8") as csvfile: shelters = csv.writer(csvfile, delimiter=",") # transpose array before write for cursor in zip(*array): shelters.writerow(cursor) r = [""] * columns_length for index, cursor in enumerate(pubdataquery): # remove any newline and carriage return if string if isinstance(cursor.value, str): value = cursor.value.replace("\n", " ").replace("\r", " ") else: value = cursor.value if index != 0: if cursor.shelter_id == prev_row.shelter_id: r[cursor.att_id - 1] = value else: shelters.writerow(r) r = [""] * columns_length r[cursor.att_id - 1] = value else: r[cursor.att_id - 1] = value prev_row = cursor shelters.writerow(r) ### dump unpublished shelters unpubdataquery = dataquery.filter(Shelter.is_published == False) unpubfilename = dump_file + "shelterdump_unpublished.csv" with open(unpubfilename, mode="wt", newline="", encoding="utf-8") as csvfile: shelters = csv.writer(csvfile, delimiter=",") # transpose array before write for cursor in zip(*array): shelters.writerow(cursor) r = [""] * columns_length for index, cursor in enumerate(unpubdataquery): # remove any newline and carriage return if string if isinstance(cursor.value, str): value = cursor.value.replace("\n", " ").replace("\r", " ") else: value = cursor.value if index != 0: if cursor.shelter_id == prev_row.shelter_id: r[cursor.att_id - 1] = cursor.value else: shelters.writerow(r) r = [""] * columns_length r[cursor.att_id - 1] = cursor.value else: r[cursor.att_id - 1] = cursor.value prev_row = cursor shelters.writerow(r)
mlalevic/pythontsp
refs/heads/master
web/application.py
33
""" Web application (from web.py) """ import webapi as web import webapi, wsgi, utils import debugerror import httpserver from utils import lstrips, safeunicode import sys import urllib import traceback import itertools import os import types from exceptions import SystemExit try: import wsgiref.handlers except ImportError: pass # don't break people with old Pythons __all__ = [ "application", "auto_application", "subdir_application", "subdomain_application", "loadhook", "unloadhook", "autodelegate" ] class application: """ Application to delegate requests based on path. >>> urls = ("/hello", "hello") >>> app = application(urls, globals()) >>> class hello: ... def GET(self): return "hello" >>> >>> app.request("/hello").data 'hello' """ def __init__(self, mapping=(), fvars={}, autoreload=None): if autoreload is None: autoreload = web.config.get('debug', False) self.init_mapping(mapping) self.fvars = fvars self.processors = [] self.add_processor(loadhook(self._load)) self.add_processor(unloadhook(self._unload)) if autoreload: def main_module_name(): mod = sys.modules['__main__'] file = getattr(mod, '__file__', None) # make sure this works even from python interpreter return file and os.path.splitext(os.path.basename(file))[0] def modname(fvars): """find name of the module name from fvars.""" file, name = fvars.get('__file__'), fvars.get('__name__') if file is None or name is None: return None if name == '__main__': # Since the __main__ module can't be reloaded, the module has # to be imported using its file name. name = main_module_name() return name mapping_name = utils.dictfind(fvars, mapping) module_name = modname(fvars) def reload_mapping(): """loadhook to reload mapping and fvars.""" mod = __import__(module_name, None, None, ['']) mapping = getattr(mod, mapping_name, None) if mapping: self.fvars = mod.__dict__ self.init_mapping(mapping) self.add_processor(loadhook(Reloader())) if mapping_name and module_name: self.add_processor(loadhook(reload_mapping)) # load __main__ module usings its filename, so that it can be reloaded. if main_module_name() and '__main__' in sys.argv: try: __import__(main_module_name()) except ImportError: pass def _load(self): web.ctx.app_stack.append(self) def _unload(self): web.ctx.app_stack = web.ctx.app_stack[:-1] if web.ctx.app_stack: # this is a sub-application, revert ctx to earlier state. oldctx = web.ctx.get('_oldctx') if oldctx: web.ctx.home = oldctx.home web.ctx.homepath = oldctx.homepath web.ctx.path = oldctx.path web.ctx.fullpath = oldctx.fullpath def _cleanup(self): # Threads can be recycled by WSGI servers. # Clearing up all thread-local state to avoid interefereing with subsequent requests. utils.ThreadedDict.clear_all() def init_mapping(self, mapping): self.mapping = list(utils.group(mapping, 2)) def add_mapping(self, pattern, classname): self.mapping.append((pattern, classname)) def add_processor(self, processor): """ Adds a processor to the application. >>> urls = ("/(.*)", "echo") >>> app = application(urls, globals()) >>> class echo: ... def GET(self, name): return name ... >>> >>> def hello(handler): return "hello, " + handler() ... >>> app.add_processor(hello) >>> app.request("/web.py").data 'hello, web.py' """ self.processors.append(processor) def request(self, localpart='/', method='GET', data=None, host="0.0.0.0:8080", headers=None, https=False, **kw): """Makes request to this application for the specified path and method. Response will be a storage object with data, status and headers. >>> urls = ("/hello", "hello") >>> app = application(urls, globals()) >>> class hello: ... def GET(self): ... web.header('Content-Type', 'text/plain') ... return "hello" ... >>> response = app.request("/hello") >>> response.data 'hello' >>> response.status '200 OK' >>> response.headers['Content-Type'] 'text/plain' To use https, use https=True. >>> urls = ("/redirect", "redirect") >>> app = application(urls, globals()) >>> class redirect: ... def GET(self): raise web.seeother("/foo") ... >>> response = app.request("/redirect") >>> response.headers['Location'] 'http://0.0.0.0:8080/foo' >>> response = app.request("/redirect", https=True) >>> response.headers['Location'] 'https://0.0.0.0:8080/foo' The headers argument specifies HTTP headers as a mapping object such as a dict. >>> urls = ('/ua', 'uaprinter') >>> class uaprinter: ... def GET(self): ... return 'your user-agent is ' + web.ctx.env['HTTP_USER_AGENT'] ... >>> app = application(urls, globals()) >>> app.request('/ua', headers = { ... 'User-Agent': 'a small jumping bean/1.0 (compatible)' ... }).data 'your user-agent is a small jumping bean/1.0 (compatible)' """ path, maybe_query = urllib.splitquery(localpart) query = maybe_query or "" if 'env' in kw: env = kw['env'] else: env = {} env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https)) headers = headers or {} for k, v in headers.items(): env['HTTP_' + k.upper().replace('-', '_')] = v if 'HTTP_CONTENT_LENGTH' in env: env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH') if 'HTTP_CONTENT_TYPE' in env: env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE') if method not in ["HEAD", "GET"]: data = data or '' import StringIO if isinstance(data, dict): q = urllib.urlencode(data) else: q = data env['wsgi.input'] = StringIO.StringIO(q) if not env.get('CONTENT_TYPE', '').lower().startswith('multipart/') and 'CONTENT_LENGTH' not in env: env['CONTENT_LENGTH'] = len(q) response = web.storage() def start_response(status, headers): response.status = status response.headers = dict(headers) response.header_items = headers response.data = "".join(self.wsgifunc()(env, start_response)) return response def browser(self): import browser return browser.AppBrowser(self) def handle(self): fn, args = self._match(self.mapping, web.ctx.path) return self._delegate(fn, self.fvars, args) def handle_with_processors(self): def process(processors): try: if processors: p, processors = processors[0], processors[1:] return p(lambda: process(processors)) else: return self.handle() except web.HTTPError: raise except (KeyboardInterrupt, SystemExit): raise except: print >> web.debug, traceback.format_exc() raise self.internalerror() # processors must be applied in the resvere order. (??) return process(self.processors) def wsgifunc(self, *middleware): """Returns a WSGI-compatible function for this application.""" def peep(iterator): """Peeps into an iterator by doing an iteration and returns an equivalent iterator. """ # wsgi requires the headers first # so we need to do an iteration # and save the result for later try: firstchunk = iterator.next() except StopIteration: firstchunk = '' return itertools.chain([firstchunk], iterator) def is_generator(x): return x and hasattr(x, 'next') def wsgi(env, start_resp): # clear threadlocal to avoid inteference of previous requests self._cleanup() self.load(env) try: # allow uppercase methods only if web.ctx.method.upper() != web.ctx.method: raise web.nomethod() result = self.handle_with_processors() if is_generator(result): result = peep(result) else: result = [result] except web.HTTPError, e: result = [e.data] result = web.safestr(iter(result)) status, headers = web.ctx.status, web.ctx.headers start_resp(status, headers) def cleanup(): self._cleanup() yield '' # force this function to be a generator return itertools.chain(result, cleanup()) for m in middleware: wsgi = m(wsgi) return wsgi def run(self, *middleware): """ Starts handling requests. If called in a CGI or FastCGI context, it will follow that protocol. If called from the command line, it will start an HTTP server on the port named in the first command line argument, or, if there is no argument, on port 8080. `middleware` is a list of WSGI middleware which is applied to the resulting WSGI function. """ return wsgi.runwsgi(self.wsgifunc(*middleware)) def stop(self): """Stops the http server started by run. """ if httpserver.server: httpserver.server.stop() httpserver.server = None def cgirun(self, *middleware): """ Return a CGI handler. This is mostly useful with Google App Engine. There you can just do: main = app.cgirun() """ wsgiapp = self.wsgifunc(*middleware) try: from google.appengine.ext.webapp.util import run_wsgi_app return run_wsgi_app(wsgiapp) except ImportError: # we're not running from within Google App Engine return wsgiref.handlers.CGIHandler().run(wsgiapp) def load(self, env): """Initializes ctx using env.""" ctx = web.ctx ctx.clear() ctx.status = '200 OK' ctx.headers = [] ctx.output = '' ctx.environ = ctx.env = env ctx.host = env.get('HTTP_HOST') if env.get('wsgi.url_scheme') in ['http', 'https']: ctx.protocol = env['wsgi.url_scheme'] elif env.get('HTTPS', '').lower() in ['on', 'true', '1']: ctx.protocol = 'https' else: ctx.protocol = 'http' ctx.homedomain = ctx.protocol + '://' + env.get('HTTP_HOST', '[unknown]') ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', '')) ctx.home = ctx.homedomain + ctx.homepath #@@ home is changed when the request is handled to a sub-application. #@@ but the real home is required for doing absolute redirects. ctx.realhome = ctx.home ctx.ip = env.get('REMOTE_ADDR') ctx.method = env.get('REQUEST_METHOD') ctx.path = env.get('PATH_INFO') # http://trac.lighttpd.net/trac/ticket/406 requires: if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'): ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath) # Apache and CherryPy webservers unquote the url but lighttpd doesn't. # unquote explicitly for lighttpd to make ctx.path uniform across all servers. ctx.path = urllib.unquote(ctx.path) if env.get('QUERY_STRING'): ctx.query = '?' + env.get('QUERY_STRING', '') else: ctx.query = '' ctx.fullpath = ctx.path + ctx.query for k, v in ctx.iteritems(): # convert all string values to unicode values and replace # malformed data with a suitable replacement marker. if isinstance(v, str): ctx[k] = v.decode('utf-8', 'replace') # status must always be str ctx.status = '200 OK' ctx.app_stack = [] def _delegate(self, f, fvars, args=[]): def handle_class(cls): meth = web.ctx.method if meth == 'HEAD' and not hasattr(cls, meth): meth = 'GET' if not hasattr(cls, meth): raise web.nomethod(cls) tocall = getattr(cls(), meth) return tocall(*args) def is_class(o): return isinstance(o, (types.ClassType, type)) if f is None: raise web.notfound() elif isinstance(f, application): return f.handle_with_processors() elif is_class(f): return handle_class(f) elif isinstance(f, basestring): if f.startswith('redirect '): url = f.split(' ', 1)[1] if web.ctx.method == "GET": x = web.ctx.env.get('QUERY_STRING', '') if x: url += '?' + x raise web.redirect(url) elif '.' in f: mod, cls = f.rsplit('.', 1) mod = __import__(mod, None, None, ['']) cls = getattr(mod, cls) else: cls = fvars[f] return handle_class(cls) elif hasattr(f, '__call__'): return f() else: return web.notfound() def _match(self, mapping, value): for pat, what in mapping: if isinstance(what, application): if value.startswith(pat): f = lambda: self._delegate_sub_application(pat, what) return f, None else: continue elif isinstance(what, basestring): what, result = utils.re_subm('^' + pat + '$', what, value) else: result = utils.re_compile('^' + pat + '$').match(value) if result: # it's a match return what, [x for x in result.groups()] return None, None def _delegate_sub_application(self, dir, app): """Deletes request to sub application `app` rooted at the directory `dir`. The home, homepath, path and fullpath values in web.ctx are updated to mimic request to the subapp and are restored after it is handled. @@Any issues with when used with yield? """ web.ctx._oldctx = web.storage(web.ctx) web.ctx.home += dir web.ctx.homepath += dir web.ctx.path = web.ctx.path[len(dir):] web.ctx.fullpath = web.ctx.fullpath[len(dir):] return app.handle_with_processors() def get_parent_app(self): if self in web.ctx.app_stack: index = web.ctx.app_stack.index(self) if index > 0: return web.ctx.app_stack[index-1] def notfound(self): """Returns HTTPError with '404 not found' message""" parent = self.get_parent_app() if parent: return parent.notfound() else: return web._NotFound() def internalerror(self): """Returns HTTPError with '500 internal error' message""" parent = self.get_parent_app() if parent: return parent.internalerror() elif web.config.get('debug'): import debugerror return debugerror.debugerror() else: return web._InternalError() class auto_application(application): """Application similar to `application` but urls are constructed automatiacally using metaclass. >>> app = auto_application() >>> class hello(app.page): ... def GET(self): return "hello, world" ... >>> class foo(app.page): ... path = '/foo/.*' ... def GET(self): return "foo" >>> app.request("/hello").data 'hello, world' >>> app.request('/foo/bar').data 'foo' """ def __init__(self): application.__init__(self) class metapage(type): def __init__(klass, name, bases, attrs): type.__init__(klass, name, bases, attrs) path = attrs.get('path', '/' + name) # path can be specified as None to ignore that class # typically required to create a abstract base class. if path is not None: self.add_mapping(path, klass) class page: path = None __metaclass__ = metapage self.page = page # The application class already has the required functionality of subdir_application subdir_application = application class subdomain_application(application): """ Application to delegate requests based on the host. >>> urls = ("/hello", "hello") >>> app = application(urls, globals()) >>> class hello: ... def GET(self): return "hello" >>> >>> mapping = (r"hello\.example\.com", app) >>> app2 = subdomain_application(mapping) >>> app2.request("/hello", host="hello.example.com").data 'hello' >>> response = app2.request("/hello", host="something.example.com") >>> response.status '404 Not Found' >>> response.data 'not found' """ def handle(self): host = web.ctx.host.split(':')[0] #strip port fn, args = self._match(self.mapping, host) return self._delegate(fn, self.fvars, args) def _match(self, mapping, value): for pat, what in mapping: if isinstance(what, basestring): what, result = utils.re_subm('^' + pat + '$', what, value) else: result = utils.re_compile('^' + pat + '$').match(value) if result: # it's a match return what, [x for x in result.groups()] return None, None def loadhook(h): """ Converts a load hook into an application processor. >>> app = auto_application() >>> def f(): "something done before handling request" ... >>> app.add_processor(loadhook(f)) """ def processor(handler): h() return handler() return processor def unloadhook(h): """ Converts an unload hook into an application processor. >>> app = auto_application() >>> def f(): "something done after handling request" ... >>> app.add_processor(unloadhook(f)) """ def processor(handler): try: result = handler() is_generator = result and hasattr(result, 'next') except: # run the hook even when handler raises some exception h() raise if is_generator: return wrap(result) else: h() return result def wrap(result): def next(): try: return result.next() except: # call the hook at the and of iterator h() raise result = iter(result) while True: yield next() return processor def autodelegate(prefix=''): """ Returns a method that takes one argument and calls the method named prefix+arg, calling `notfound()` if there isn't one. Example: urls = ('/prefs/(.*)', 'prefs') class prefs: GET = autodelegate('GET_') def GET_password(self): pass def GET_privacy(self): pass `GET_password` would get called for `/prefs/password` while `GET_privacy` for `GET_privacy` gets called for `/prefs/privacy`. If a user visits `/prefs/password/change` then `GET_password(self, '/change')` is called. """ def internal(self, arg): if '/' in arg: first, rest = arg.split('/', 1) func = prefix + first args = ['/' + rest] else: func = prefix + arg args = [] if hasattr(self, func): try: return getattr(self, func)(*args) except TypeError: raise web.notfound() else: raise web.notfound() return internal class Reloader: """Checks to see if any loaded modules have changed on disk and, if so, reloads them. """ """File suffix of compiled modules.""" if sys.platform.startswith('java'): SUFFIX = '$py.class' else: SUFFIX = '.pyc' def __init__(self): self.mtimes = {} def __call__(self): for mod in sys.modules.values(): self.check(mod) def check(self, mod): # jython registers java packages as modules but they either # don't have a __file__ attribute or its value is None if not (mod and hasattr(mod, '__file__') and mod.__file__): return try: mtime = os.stat(mod.__file__).st_mtime except (OSError, IOError): return if mod.__file__.endswith(self.__class__.SUFFIX) and os.path.exists(mod.__file__[:-1]): mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime) if mod not in self.mtimes: self.mtimes[mod] = mtime elif self.mtimes[mod] < mtime: try: reload(mod) self.mtimes[mod] = mtime except ImportError: pass if __name__ == "__main__": import doctest doctest.testmod()
jaredks/rumps
refs/heads/master
examples/example_simple.py
2
import rumps import time rumps.debug_mode(True) # turn on command line logging information for development - default is off @rumps.clicked("About") def about(sender): sender.title = 'NOM' if sender.title == 'About' else 'About' # can adjust titles of menu items dynamically rumps.alert("This is a cool app!") @rumps.clicked("Arbitrary", "Depth", "It's pretty easy") # very simple to access nested menu items def does_something(sender): my_data = {'poop': 88} rumps.notification(title='Hi', subtitle='There.', message='Friend!', sound=does_something.sound, data=my_data) does_something.sound = True @rumps.clicked("Preferences") def not_actually_prefs(sender): if not sender.icon: sender.icon = 'level_4.png' sender.state = not sender.state does_something.sound = not does_something.sound @rumps.timer(4) # create a new thread that calls the decorated function every 4 seconds def write_unix_time(sender): with app.open('times', 'a') as f: # this opens files in your app's Application Support folder f.write('The unix time now: {}\n'.format(time.time())) @rumps.clicked("Arbitrary") def change_statusbar_title(sender): app.title = 'Hello World' if app.title != 'Hello World' else 'World, Hello' @rumps.notifications def notifications(notification): # function that reacts to incoming notification dicts print(notification) def onebitcallback(sender): # functions don't have to be decorated to serve as callbacks for buttons print(4848484) # this function is specified as a callback when creating a MenuItem below if __name__ == "__main__": app = rumps.App("My Toolbar App", title='World, Hello') app.menu = [ rumps.MenuItem('About', icon='pony.jpg', dimensions=(18, 18)), # can specify an icon to be placed near text 'Preferences', None, # None functions as a separator in your menu {'Arbitrary': {"Depth": ["Menus", "It's pretty easy"], "And doesn't": ["Even look like Objective C", rumps.MenuItem("One bit", callback=onebitcallback)]}}, None ] app.run()
cirruscluster/cirruscluster
refs/heads/master
cirruscluster/ext/ansible/__init__.py
1
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. __version__ = '1.0' __author__ = 'Michael DeHaan'
pumaking/hackracer
refs/heads/master
lib/flask/sessions.py
348
# -*- coding: utf-8 -*- """ flask.sessions ~~~~~~~~~~~~~~ Implements cookie based sessions based on itsdangerous. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import uuid import hashlib from datetime import datetime from werkzeug.http import http_date, parse_date from werkzeug.datastructures import CallbackDict from . import Markup, json from ._compat import iteritems, text_type from itsdangerous import URLSafeTimedSerializer, BadSignature def total_seconds(td): return td.days * 60 * 60 * 24 + td.seconds class SessionMixin(object): """Expands a basic dictionary with an accessors that are expected by Flask extensions and users for the session. """ def _get_permanent(self): return self.get('_permanent', False) def _set_permanent(self, value): self['_permanent'] = bool(value) #: this reflects the ``'_permanent'`` key in the dict. permanent = property(_get_permanent, _set_permanent) del _get_permanent, _set_permanent #: some session backends can tell you if a session is new, but that is #: not necessarily guaranteed. Use with caution. The default mixin #: implementation just hardcodes `False` in. new = False #: for some backends this will always be `True`, but some backends will #: default this to false and detect changes in the dictionary for as #: long as changes do not happen on mutable structures in the session. #: The default mixin implementation just hardcodes `True` in. modified = True class TaggedJSONSerializer(object): """A customized JSON serializer that supports a few extra types that we take for granted when serializing (tuples, markup objects, datetime). """ def dumps(self, value): def _tag(value): if isinstance(value, tuple): return {' t': [_tag(x) for x in value]} elif isinstance(value, uuid.UUID): return {' u': value.hex} elif callable(getattr(value, '__html__', None)): return {' m': text_type(value.__html__())} elif isinstance(value, list): return [_tag(x) for x in value] elif isinstance(value, datetime): return {' d': http_date(value)} elif isinstance(value, dict): return dict((k, _tag(v)) for k, v in iteritems(value)) elif isinstance(value, str): try: return text_type(value) except UnicodeError: raise UnexpectedUnicodeError(u'A byte string with ' u'non-ASCII data was passed to the session system ' u'which can only store unicode strings. Consider ' u'base64 encoding your string (String was %r)' % value) return value return json.dumps(_tag(value), separators=(',', ':')) def loads(self, value): def object_hook(obj): if len(obj) != 1: return obj the_key, the_value = next(iteritems(obj)) if the_key == ' t': return tuple(the_value) elif the_key == ' u': return uuid.UUID(the_value) elif the_key == ' m': return Markup(the_value) elif the_key == ' d': return parse_date(the_value) return obj return json.loads(value, object_hook=object_hook) session_json_serializer = TaggedJSONSerializer() class SecureCookieSession(CallbackDict, SessionMixin): """Baseclass for sessions based on signed cookies.""" def __init__(self, initial=None): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.modified = False class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args, **kwargs): raise RuntimeError('the session is unavailable because no secret ' 'key was set. Set the secret_key on the ' 'application to something unique and secret.') __setitem__ = __delitem__ = clear = pop = popitem = \ update = setdefault = _fail del _fail class SessionInterface(object): """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns `None` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app): """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj): """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_domain(self, app): """Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used. """ if app.config['SESSION_COOKIE_DOMAIN'] is not None: return app.config['SESSION_COOKIE_DOMAIN'] if app.config['SERVER_NAME'] is not None: # chop of the port which is usually not supported by browsers rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] # Google chrome does not like cookies set to .localhost, so # we just go with no domain then. Flask documents anyways that # cross domain cookies need a fully qualified domain name if rv == '.localhost': rv = None # If we infer the cookie domain from the server name we need # to check if we are in a subpath. In that case we can't # set a cross domain cookie. if rv is not None: path = self.get_cookie_path(app) if path != '/': rv = rv.lstrip('.') return rv def get_cookie_path(self, app): """Returns the path for which the cookie should be valid. The default implementation uses the value from the SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's `None`. """ return app.config['SESSION_COOKIE_PATH'] or \ app.config['APPLICATION_ROOT'] or '/' def get_cookie_httponly(self, app): """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config['SESSION_COOKIE_HTTPONLY'] def get_cookie_secure(self, app): """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config['SESSION_COOKIE_SECURE'] def get_expiration_time(self, app, session): """A helper method that returns an expiration date for the session or `None` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime def open_session(self, app, request): """This method has to be implemented and must either return `None` in case the loading failed because of a configuration error or an instance of a session object which implements a dictionary like interface + the methods and attributes on :class:`SessionMixin`. """ raise NotImplementedError() def save_session(self, app, session, response): """This is called for actual sessions returned by :meth:`open_session` at the end of the request. This is still called during a request context so if you absolutely need access to the request you can do that. """ raise NotImplementedError() class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = 'cookie-session' #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(hashlib.sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = 'hmac' #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app): if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer(app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs) def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(app.session_cookie_name) if not val: return self.session_class() max_age = total_seconds(app.permanent_session_lifetime) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session(self, app, session, response): domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) if not session: if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain, path=path) return httponly = self.get_cookie_httponly(app) secure = self.get_cookie_secure(app) expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) response.set_cookie(app.session_cookie_name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure) from flask.debughelpers import UnexpectedUnicodeError
LaMi-/pmatic
refs/heads/master
ccu_pkg/python/lib/python2.7/quopri.py
8
#! /usr/bin/python2.7 """Conversions to/from quoted-printable transport encoding as per RFC 1521.""" # (Dec 1991 version). __all__ = ["encode", "decode", "encodestring", "decodestring"] ESCAPE = '=' MAXLINESIZE = 76 HEX = '0123456789ABCDEF' EMPTYSTRING = '' try: from binascii import a2b_qp, b2a_qp except ImportError: a2b_qp = None b2a_qp = None def needsquoting(c, quotetabs, header): """Decide whether a particular character needs to be quoted. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. """ if c in ' \t': return quotetabs # if header, we have to escape _ because _ is used to escape space if c == '_': return header return c == ESCAPE or not (' ' <= c <= '~') def quote(c): """Quote a single character.""" i = ord(c) return ESCAPE + HEX[i//16] + HEX[i%16] def encode(input, output, quotetabs, header = 0): """Read 'input', apply quoted-printable encoding, and write to 'output'. 'input' and 'output' are files with readline() and write() methods. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. The 'header' flag indicates whether we are encoding spaces as _ as per RFC 1522. """ if b2a_qp is not None: data = input.read() odata = b2a_qp(data, quotetabs = quotetabs, header = header) output.write(odata) return def write(s, output=output, lineEnd='\n'): # RFC 1521 requires that the line ending in a space or tab must have # that trailing character encoded. if s and s[-1:] in ' \t': output.write(s[:-1] + quote(s[-1]) + lineEnd) elif s == '.': output.write(quote(s) + lineEnd) else: output.write(s + lineEnd) prevline = None while 1: line = input.readline() if not line: break outline = [] # Strip off any readline induced trailing newline stripped = '' if line[-1:] == '\n': line = line[:-1] stripped = '\n' # Calculate the un-length-limited encoded line for c in line: if needsquoting(c, quotetabs, header): c = quote(c) if header and c == ' ': outline.append('_') else: outline.append(c) # First, write out the previous line if prevline is not None: write(prevline) # Now see if we need any soft line breaks because of RFC-imposed # length limitations. Then do the thisline->prevline dance. thisline = EMPTYSTRING.join(outline) while len(thisline) > MAXLINESIZE: # Don't forget to include the soft line break `=' sign in the # length calculation! write(thisline[:MAXLINESIZE-1], lineEnd='=\n') thisline = thisline[MAXLINESIZE-1:] # Write out the current line prevline = thisline # Write out the last line, without a trailing newline if prevline is not None: write(prevline, lineEnd=stripped) def encodestring(s, quotetabs = 0, header = 0): if b2a_qp is not None: return b2a_qp(s, quotetabs = quotetabs, header = header) from cStringIO import StringIO infp = StringIO(s) outfp = StringIO() encode(infp, outfp, quotetabs, header) return outfp.getvalue() def decode(input, output, header = 0): """Read 'input', apply quoted-printable decoding, and write to 'output'. 'input' and 'output' are files with readline() and write() methods. If 'header' is true, decode underscore as space (per RFC 1522).""" if a2b_qp is not None: data = input.read() odata = a2b_qp(data, header = header) output.write(odata) return new = '' while 1: line = input.readline() if not line: break i, n = 0, len(line) if n > 0 and line[n-1] == '\n': partial = 0; n = n-1 # Strip trailing whitespace while n > 0 and line[n-1] in " \t\r": n = n-1 else: partial = 1 while i < n: c = line[i] if c == '_' and header: new = new + ' '; i = i+1 elif c != ESCAPE: new = new + c; i = i+1 elif i+1 == n and not partial: partial = 1; break elif i+1 < n and line[i+1] == ESCAPE: new = new + ESCAPE; i = i+2 elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]): new = new + chr(unhex(line[i+1:i+3])); i = i+3 else: # Bad escape sequence -- leave it in new = new + c; i = i+1 if not partial: output.write(new + '\n') new = '' if new: output.write(new) def decodestring(s, header = 0): if a2b_qp is not None: return a2b_qp(s, header = header) from cStringIO import StringIO infp = StringIO(s) outfp = StringIO() decode(infp, outfp, header = header) return outfp.getvalue() # Other helper functions def ishex(c): """Return true if the character 'c' is a hexadecimal digit.""" return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F' def unhex(s): """Get the integer value of a hexadecimal number.""" bits = 0 for c in s: if '0' <= c <= '9': i = ord('0') elif 'a' <= c <= 'f': i = ord('a')-10 elif 'A' <= c <= 'F': i = ord('A')-10 else: break bits = bits*16 + (ord(c) - i) return bits def main(): import sys import getopt try: opts, args = getopt.getopt(sys.argv[1:], 'td') except getopt.error, msg: sys.stdout = sys.stderr print msg print "usage: quopri [-t | -d] [file] ..." print "-t: quote tabs" print "-d: decode; default encode" sys.exit(2) deco = 0 tabs = 0 for o, a in opts: if o == '-t': tabs = 1 if o == '-d': deco = 1 if tabs and deco: sys.stdout = sys.stderr print "-t and -d are mutually exclusive" sys.exit(2) if not args: args = ['-'] sts = 0 for file in args: if file == '-': fp = sys.stdin else: try: fp = open(file) except IOError, msg: sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) sts = 1 continue if deco: decode(fp, sys.stdout) else: encode(fp, sys.stdout, tabs) if fp is not sys.stdin: fp.close() if sts: sys.exit(sts) if __name__ == '__main__': main()
codeinthehole/purl
refs/heads/master
purl/template.py
1
import re import functools try: from urllib.parse import quote except ImportError: # Python 2 from urllib import quote from . import url __all__ = ['Template', 'expand'] patterns = re.compile(r"{([^\}]+)}") class Template(object): def __init__(self, url_str): self._base = url_str def __str__(self): return 'Template: %s' % self._base def expand(self, variables=None): return url.URL(expand(self._base, variables)) def expand(template, variables=None): """ Expand a URL template string using the passed variables """ if variables is None: variables = {} return patterns.sub(functools.partial(_replace, variables), template) # Utils def _flatten(container): """ _flatten a sequence of sequences into a single list """ _flattened = [] for sequence in container: _flattened.extend(sequence) return _flattened # Format functions # ---------------- # These are responsible for formatting the (key, value) pair into a string def _format_pair_no_equals(explode, separator, escape, key, value): """ Format a key, value pair but don't include the equals sign when there is no value """ if not value: return key return _format_pair(explode, separator, escape, key, value) def _format_pair_with_equals(explode, separator, escape, key, value): """ Format a key, value pair including the equals sign when there is no value """ if not value: return key + '=' return _format_pair(explode, separator, escape, key, value) def _format_pair(explode, separator, escape, key, value): if isinstance(value, (list, tuple)): join_char = "," if explode: join_char = separator try: dict(value) except: # Scalar container if explode: items = ["%s=%s" % (key, escape(v)) for v in value] return join_char.join(items) else: escaped_value = join_char.join(map(escape, value)) else: # Tuple container if explode: items = ["%s=%s" % (k, escape(v)) for (k, v) in value] return join_char.join(items) else: items = _flatten(value) escaped_value = join_char.join(map(escape, items)) else: escaped_value = escape(value) return '%s=%s' % (key, escaped_value) def _format_default(explode, separator, escape, key, value): if isinstance(value, (list, tuple)): join_char = "," if explode: join_char = separator try: dict(value) except: # Scalar container escaped_value = join_char.join(map(escape, value)) else: # Tuple container if explode: items = ["%s=%s" % (k, escape(v)) for (k, v) in value] escaped_value = join_char.join(items) else: items = _flatten(value) escaped_value = join_char.join(map(escape, items)) else: escaped_value = escape(value) return escaped_value # Modifer functions # ----------------- # These are responsible for modifying the variable before formatting _identity = lambda x: x def _truncate(string, num_chars): return string[:num_chars] # Splitting functions # ------------------- # These are responsible for splitting a string into a sequence of (key, # modifier) tuples def _split_basic(string): """ Split a string into a list of tuples of the form (key, modifier_fn, explode) where modifier_fn is a function that applies the appropriate modification to the variable. """ tuples = [] for word in string.split(','): # Attempt to split on colon parts = word.split(':', 2) key, modifier_fn, explode = parts[0], _identity, False if len(parts) > 1: modifier_fn = functools.partial( _truncate, num_chars=int(parts[1])) if word[len(word) - 1] == '*': key = word[:len(word) - 1] explode = True tuples.append((key, modifier_fn, explode)) return tuples def _split_operator(string): return _split_basic(string[1:]) # Escaping functions # ------------------ def _escape_all(value): return url.unicode_quote(value, safe="") def _escape_reserved(value): return url.unicode_quote(value, safe="/!,.;") # Operator map # ------------ # A mapping of: # operator -> (prefix, separator, split_fn, escape_fn, format_fn) operator_map = { '+': ('', ',', _split_operator, _escape_reserved, _format_default), '#': ('#', ',', _split_operator, _escape_reserved, _format_default), '.': ('.', '.', _split_operator, _escape_all, _format_default), '/': ('/', '/', _split_operator, _escape_all, _format_default), ';': (';', ';', _split_operator, _escape_all, _format_pair_no_equals), '?': ('?', '&', _split_operator, _escape_all, _format_pair_with_equals), '&': ('&', '&', _split_operator, _escape_all, _format_pair_with_equals), } defaults = ('', ',', _split_basic, _escape_all, _format_default) def _replace(variables, match): """ Return the appropriate replacement for `match` using the passed variables """ expression = match.group(1) # Look-up chars and functions for the specified operator (prefix_char, separator_char, split_fn, escape_fn, format_fn) = operator_map.get(expression[0], defaults) replacements = [] for key, modify_fn, explode in split_fn(expression): if key in variables: variable = modify_fn(variables[key]) replacement = format_fn( explode, separator_char, escape_fn, key, variable) replacements.append(replacement) if not replacements: return '' return prefix_char + separator_char.join(replacements)
akail/fiplanner
refs/heads/master
tests/__init__.py
14224
# -*- coding: utf-8 -*-
aviweit/libcloud
refs/heads/trunk
libcloud/test/compute/test_base.py
42
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.common.base import Response from libcloud.common.base import Connection, ConnectionKey, ConnectionUserAndKey from libcloud.common.types import LibcloudError from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver, StorageVolume from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword from libcloud.compute.types import StorageVolumeState from libcloud.test import MockResponse # pylint: disable-msg=E0611 class FakeDriver(object): type = 0 class BaseTests(unittest.TestCase): def test_base_node(self): Node(id=0, name=0, state=0, public_ips=0, private_ips=0, driver=FakeDriver()) def test_base_node_size(self): NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0, driver=FakeDriver()) def test_base_node_image(self): NodeImage(id=0, name=0, driver=FakeDriver()) def test_base_storage_volume(self): StorageVolume(id="0", name="0", size=10, driver=FakeDriver(), state=StorageVolumeState.AVAILABLE) def test_base_response(self): Response(MockResponse(status=200, body='foo'), ConnectionKey('foo')) def test_base_node_driver(self): NodeDriver('foo') def test_base_connection_key(self): ConnectionKey('foo') def test_base_connection_userkey(self): ConnectionUserAndKey('foo', 'bar') def test_base_connection_timeout(self): Connection(timeout=10) class TestValidateAuth(unittest.TestCase): def test_get_auth_ssh(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} auth = NodeAuthSSHKey('pubkey...') self.assertEqual(auth, n._get_and_check_auth(auth)) def test_get_auth_ssh_but_given_password(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} auth = NodeAuthPassword('password') self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_password(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = NodeAuthPassword('password') self.assertEqual(auth, n._get_and_check_auth(auth)) def test_get_auth_password_but_given_ssh_key(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = NodeAuthSSHKey('publickey') self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_default_ssh_key(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} self.assertEqual(None, n._get_and_check_auth(None)) def test_get_auth_default_password(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = n._get_and_check_auth(None) self.assertTrue(isinstance(auth, NodeAuthPassword)) def test_get_auth_default_no_feature(self): n = NodeDriver('foo') self.assertEqual(None, n._get_and_check_auth(None)) def test_get_auth_generates_password_but_given_nonsense(self): n = NodeDriver('foo') n.features = {'create_node': ['generates_password']} auth = "nonsense" self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_no_features_but_given_nonsense(self): n = NodeDriver('foo') auth = "nonsense" self.assertRaises(LibcloudError, n._get_and_check_auth, auth) if __name__ == '__main__': sys.exit(unittest.main())
victor-prado/broker-manager
refs/heads/master
environment/lib/python3.5/site-packages/pandas/tests/plotting/test_series.py
7
#!/usr/bin/env python # coding: utf-8 import nose import itertools from datetime import datetime import pandas as pd from pandas import Series, DataFrame, date_range from pandas.compat import range, lrange import pandas.util.testing as tm from pandas.util.testing import slow import numpy as np from numpy.random import randn import pandas.tools.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, _skip_if_no_scipy_gaussian_kde, _ok_for_gaussian_kde) """ Test cases for Series.plot """ @tm.mplskip class TestSeriesPlots(TestPlotBase): def setUp(self): TestPlotBase.setUp(self) import matplotlib as mpl mpl.rcdefaults() self.ts = tm.makeTimeSeries() self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' self.iseries = tm.makePeriodSeries() self.iseries.name = 'iseries' @slow def test_plot(self): _check_plot_works(self.ts.plot, label='foo') _check_plot_works(self.ts.plot, use_index=False) axes = _check_plot_works(self.ts.plot, rot=0) self._check_ticks_props(axes, xrot=0) ax = _check_plot_works(self.ts.plot, style='.', logy=True) self._check_ax_scales(ax, yaxis='log') ax = _check_plot_works(self.ts.plot, style='.', logx=True) self._check_ax_scales(ax, xaxis='log') ax = _check_plot_works(self.ts.plot, style='.', loglog=True) self._check_ax_scales(ax, xaxis='log', yaxis='log') _check_plot_works(self.ts[:10].plot.bar) _check_plot_works(self.ts.plot.area, stacked=False) _check_plot_works(self.iseries.plot) for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(self.series[:5].plot, kind=kind) _check_plot_works(self.series[:10].plot.barh) ax = _check_plot_works(Series(randn(10)).plot.bar, color='black') self._check_colors([ax.patches[0]], facecolors=['black']) # GH 6951 ax = _check_plot_works(self.ts.plot, subplots=True) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) @slow def test_plot_figsize_and_title(self): # figsize and title ax = self.series.plot(title='Test', figsize=(16, 8)) self._check_text_labels(ax.title, 'Test') self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) def test_dont_modify_rcParams(self): # GH 8242 if self.mpl_ge_1_5_0: key = 'axes.prop_cycle' else: key = 'axes.color_cycle' colors = self.plt.rcParams[key] Series([1, 2, 3]).plot() self.assertEqual(colors, self.plt.rcParams[key]) def test_ts_line_lim(self): ax = self.ts.plot() xmin, xmax = ax.get_xlim() lines = ax.get_lines() self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) tm.close() ax = self.ts.plot(secondary_y=True) xmin, xmax = ax.get_xlim() lines = ax.get_lines() self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) def test_ts_area_lim(self): ax = self.ts.plot.area(stacked=False) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) tm.close() # GH 7471 ax = self.ts.plot.area(stacked=False, x_compat=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) tm.close() tz_ts = self.ts.copy() tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET') ax = tz_ts.plot.area(stacked=False, x_compat=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) tm.close() ax = tz_ts.plot.area(stacked=False, secondary_y=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) def test_label(self): s = Series([1, 2]) ax = s.plot(label='LABEL', legend=True) self._check_legend_labels(ax, labels=['LABEL']) self.plt.close() ax = s.plot(legend=True) self._check_legend_labels(ax, labels=['None']) self.plt.close() # get name from index s.name = 'NAME' ax = s.plot(legend=True) self._check_legend_labels(ax, labels=['NAME']) self.plt.close() # override the default ax = s.plot(legend=True, label='LABEL') self._check_legend_labels(ax, labels=['LABEL']) self.plt.close() # Add lebel info, but don't draw ax = s.plot(legend=False, label='LABEL') self.assertEqual(ax.get_legend(), None) # Hasn't been drawn ax.legend() # draw it self._check_legend_labels(ax, labels=['LABEL']) def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] s = Series(values) ts = Series(values, index=tm.makeDateIndex(k=4)) for d in [s, ts]: ax = _check_plot_works(d.plot) masked = ax.lines[0].get_ydata() # remove nan for comparison purpose exp = np.array([1, 2, 3], dtype=np.float64) self.assert_numpy_array_equal(np.delete(masked.data, 2), exp) self.assert_numpy_array_equal( masked.mask, np.array([False, False, True, False])) expected = np.array([1, 2, 0, 3], dtype=np.float64) ax = _check_plot_works(d.plot, stacked=True) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) ax = _check_plot_works(d.plot.area) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) ax = _check_plot_works(d.plot.area, stacked=False) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) def test_line_use_index_false(self): s = Series([1, 2, 3], index=['a', 'b', 'c']) s.index.name = 'The Index' ax = s.plot(use_index=False) label = ax.get_xlabel() self.assertEqual(label, '') ax2 = s.plot.bar(use_index=False) label2 = ax2.get_xlabel() self.assertEqual(label2, '') @slow def test_bar_log(self): expected = np.array([1., 10., 100., 1000.]) if not self.mpl_le_1_2_1: expected = np.hstack((.1, expected, 1e4)) ax = Series([200, 500]).plot.bar(log=True) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() ax = Series([200, 500]).plot.barh(log=True) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) tm.close() # GH 9905 expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00]) if not self.mpl_le_1_2_1: expected = np.hstack((1.0e-04, expected, 1.0e+01)) if self.mpl_ge_2_0_0: expected = np.hstack((1.0e-05, expected)) ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar') ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001 ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001 res = ax.get_ylim() self.assertAlmostEqual(res[0], ymin) self.assertAlmostEqual(res[1], ymax) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh') res = ax.get_xlim() self.assertAlmostEqual(res[0], ymin) self.assertAlmostEqual(res[1], ymax) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) @slow def test_bar_ignore_index(self): df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) ax = df.plot.bar(use_index=False) self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3']) def test_rotation(self): df = DataFrame(randn(5, 5)) # Default rot 0 axes = df.plot() self._check_ticks_props(axes, xrot=0) axes = df.plot(rot=30) self._check_ticks_props(axes, xrot=30) def test_irregular_datetime(self): rng = date_range('1/1/2000', '3/1/2000') rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] ser = Series(randn(len(rng)), rng) ax = ser.plot() xp = datetime(1999, 1, 1).toordinal() ax.set_xlim('1/1/1999', '1/1/2001') self.assertEqual(xp, ax.get_xlim()[0]) @slow def test_pie_series(self): # if sum of values is less than 1.0, pie handle them as rate and draw # semicircle. series = Series(np.random.randint(1, 5), index=['a', 'b', 'c', 'd', 'e'], name='YLABEL') ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, series.index) self.assertEqual(ax.get_ylabel(), 'YLABEL') # without wedge labels ax = _check_plot_works(series.plot.pie, labels=None) self._check_text_labels(ax.texts, [''] * 5) # with less colors than elements color_args = ['r', 'g', 'b'] ax = _check_plot_works(series.plot.pie, colors=color_args) color_expected = ['r', 'g', 'b', 'r', 'g'] self._check_colors(ax.patches, facecolors=color_expected) # with labels and colors labels = ['A', 'B', 'C', 'D', 'E'] color_args = ['r', 'g', 'b', 'c', 'm'] ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) self._check_text_labels(ax.texts, labels) self._check_colors(ax.patches, facecolors=color_args) # with autopct and fontsize ax = _check_plot_works(series.plot.pie, colors=color_args, autopct='%.2f', fontsize=7) pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())] iters = [iter(series.index), iter(pcts)] expected_texts = list(next(it) for it in itertools.cycle(iters)) self._check_text_labels(ax.texts, expected_texts) for t in ax.texts: self.assertEqual(t.get_fontsize(), 7) # includes negative value with tm.assertRaises(ValueError): series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e']) series.plot.pie() # includes nan series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'], name='YLABEL') ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, ['a', 'b', '', 'd']) def test_pie_nan(self): s = Series([1, np.nan, 1, 1]) ax = s.plot.pie(legend=True) expected = ['0', '', '2', '3'] result = [x.get_text() for x in ax.texts] self.assertEqual(result, expected) @slow def test_hist_df_kwargs(self): df = DataFrame(np.random.randn(10, 2)) ax = df.plot.hist(bins=5) self.assertEqual(len(ax.patches), 10) @slow def test_hist_df_with_nonnumerics(self): # GH 9853 with tm.RNGContext(1): df = DataFrame( np.random.randn(10, 4), columns=['A', 'B', 'C', 'D']) df['E'] = ['x', 'y'] * 5 ax = df.plot.hist(bins=5) self.assertEqual(len(ax.patches), 20) ax = df.plot.hist() # bins=10 self.assertEqual(len(ax.patches), 40) @slow def test_hist_legacy(self): _check_plot_works(self.ts.hist) _check_plot_works(self.ts.hist, grid=False) _check_plot_works(self.ts.hist, figsize=(8, 10)) # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning): _check_plot_works(self.ts.hist, by=self.ts.index.month) with tm.assert_produces_warning(UserWarning): _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5) fig, ax = self.plt.subplots(1, 1) _check_plot_works(self.ts.hist, ax=ax) _check_plot_works(self.ts.hist, ax=ax, figure=fig) _check_plot_works(self.ts.hist, figure=fig) tm.close() fig, (ax1, ax2) = self.plt.subplots(1, 2) _check_plot_works(self.ts.hist, figure=fig, ax=ax1) _check_plot_works(self.ts.hist, figure=fig, ax=ax2) with tm.assertRaises(ValueError): self.ts.hist(by=self.ts.index, figure=fig) @slow def test_hist_bins_legacy(self): df = DataFrame(np.random.randn(10, 2)) ax = df.hist(bins=2)[0][0] self.assertEqual(len(ax.patches), 2) @slow def test_hist_layout(self): df = self.hist_df with tm.assertRaises(ValueError): df.height.hist(layout=(1, 1)) with tm.assertRaises(ValueError): df.height.hist(layout=[1, 1]) @slow def test_hist_layout_with_by(self): df = self.hist_df # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1)) self._check_axes_shape(axes, axes_num=2, layout=(3, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1)) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1)) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1)) self._check_axes_shape(axes, axes_num=4, layout=(3, 2)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4)) self._check_axes_shape(axes, axes_num=4, layout=(1, 4)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) @slow def test_hist_no_overlap(self): from matplotlib.pyplot import subplot, gcf x = Series(randn(2)) y = Series(randn(2)) subplot(121) x.hist() subplot(122) y.hist() fig = gcf() axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes() self.assertEqual(len(axes), 2) @slow def test_hist_secondary_legend(self): # GH 9610 df = DataFrame(np.random.randn(30, 4), columns=list('abcd')) # primary -> secondary ax = df['a'].plot.hist(legend=True) df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b (right)']) self.assertTrue(ax.get_yaxis().get_visible()) self.assertTrue(ax.right_ax.get_yaxis().get_visible()) tm.close() # secondary -> secondary ax = df['a'].plot.hist(legend=True, secondary_y=True) df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are draw on left ax # left axis must be invisible, right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)']) self.assertFalse(ax.left_ax.get_yaxis().get_visible()) self.assertTrue(ax.get_yaxis().get_visible()) tm.close() # secondary -> primary ax = df['a'].plot.hist(legend=True, secondary_y=True) # right axes is returned df['b'].plot.hist(ax=ax, legend=True) # both legends are draw on left ax # left and right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b']) self.assertTrue(ax.left_ax.get_yaxis().get_visible()) self.assertTrue(ax.get_yaxis().get_visible()) tm.close() @slow def test_df_series_secondary_legend(self): # GH 9779 df = DataFrame(np.random.randn(30, 3), columns=list('abc')) s = Series(np.random.randn(30), name='x') # primary -> secondary (without passing ax) ax = df.plot() s.plot(legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)']) self.assertTrue(ax.get_yaxis().get_visible()) self.assertTrue(ax.right_ax.get_yaxis().get_visible()) tm.close() # primary -> secondary (with passing ax) ax = df.plot() s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)']) self.assertTrue(ax.get_yaxis().get_visible()) self.assertTrue(ax.right_ax.get_yaxis().get_visible()) tm.close() # seconcary -> secondary (without passing ax) ax = df.plot(secondary_y=True) s.plot(legend=True, secondary_y=True) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)'] self._check_legend_labels(ax.left_ax, labels=expected) self.assertFalse(ax.left_ax.get_yaxis().get_visible()) self.assertTrue(ax.get_yaxis().get_visible()) tm.close() # secondary -> secondary (with passing ax) ax = df.plot(secondary_y=True) s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)'] self._check_legend_labels(ax.left_ax, expected) self.assertFalse(ax.left_ax.get_yaxis().get_visible()) self.assertTrue(ax.get_yaxis().get_visible()) tm.close() # secondary -> secondary (with passing ax) ax = df.plot(secondary_y=True, mark_right=False) s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a', 'b', 'c', 'x (right)'] self._check_legend_labels(ax.left_ax, expected) self.assertFalse(ax.left_ax.get_yaxis().get_visible()) self.assertTrue(ax.get_yaxis().get_visible()) tm.close() @slow def test_plot_fails_with_dupe_color_and_style(self): x = Series(randn(2)) with tm.assertRaises(ValueError): x.plot(style='k--', color='k') @slow def test_hist_kde(self): ax = self.ts.plot.hist(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() # ticks are values, thus ticklabels are blank self._check_text_labels(xlabels, [''] * len(xlabels)) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _check_plot_works(self.ts.plot.kde) _check_plot_works(self.ts.plot.density) ax = self.ts.plot.kde(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [''] * len(xlabels)) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) @slow def test_kde_kwargs(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() from numpy import linspace _check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100, 100, 20)) _check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100, 100, 20)) ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100, 100, 20)) self._check_ax_scales(ax, yaxis='log') self._check_text_labels(ax.yaxis.get_label(), 'Density') @slow def test_kde_missing_vals(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() s = Series(np.random.uniform(size=50)) s[0] = np.nan axes = _check_plot_works(s.plot.kde) # check if the values have any missing values # GH14821 self.assertTrue(any(~np.isnan(axes.lines[0].get_xdata())), msg='Missing Values not dropped') @slow def test_hist_kwargs(self): ax = self.ts.plot.hist(bins=5) self.assertEqual(len(ax.patches), 5) self._check_text_labels(ax.yaxis.get_label(), 'Frequency') tm.close() if self.mpl_ge_1_3_1: ax = self.ts.plot.hist(orientation='horizontal') self._check_text_labels(ax.xaxis.get_label(), 'Frequency') tm.close() ax = self.ts.plot.hist(align='left', stacked=True) tm.close() @slow def test_hist_kde_color(self): ax = self.ts.plot.hist(logy=True, bins=10, color='b') self._check_ax_scales(ax, yaxis='log') self.assertEqual(len(ax.patches), 10) self._check_colors(ax.patches, facecolors=['b'] * 10) tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() ax = self.ts.plot.kde(logy=True, color='r') self._check_ax_scales(ax, yaxis='log') lines = ax.get_lines() self.assertEqual(len(lines), 1) self._check_colors(lines, ['r']) @slow def test_boxplot_series(self): ax = self.ts.plot.box(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [self.ts.name]) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) @slow def test_kind_both_ways(self): s = Series(range(3)) for kind in plotting._common_kinds + plotting._series_kinds: if not _ok_for_gaussian_kde(kind): continue s.plot(kind=kind) getattr(s.plot, kind)() @slow def test_invalid_plot_data(self): s = Series(list('abcd')) for kind in plotting._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): s.plot(kind=kind) @slow def test_valid_object_plot(self): s = Series(lrange(10), dtype=object) for kind in plotting._common_kinds: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(s.plot, kind=kind) def test_partially_invalid_plot_data(self): s = Series(['a', 'b', 1.0, 2]) for kind in plotting._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): s.plot(kind=kind) def test_invalid_kind(self): s = Series([1, 2]) with tm.assertRaises(ValueError): s.plot(kind='aasdf') @slow def test_dup_datetime_index_plot(self): dr1 = date_range('1/1/2009', periods=4) dr2 = date_range('1/2/2009', periods=4) index = dr1.append(dr2) values = randn(index.size) s = Series(values, index=index) _check_plot_works(s.plot) @slow def test_errorbar_plot(self): s = Series(np.arange(10), name='x') s_err = np.random.randn(10) d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y']) # test line and bar plots kinds = ['line', 'bar'] for kind in kinds: ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err, kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=d_err, kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind) self._check_has_errorbars(ax, xerr=1, yerr=1) ax = _check_plot_works(s.plot, xerr=s_err) self._check_has_errorbars(ax, xerr=1, yerr=0) # test time series plotting ix = date_range('1/1/2000', '1/1/2001', freq='M') ts = Series(np.arange(12), index=ix, name='x') ts_err = Series(np.random.randn(12), index=ix) td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y']) ax = _check_plot_works(ts.plot, yerr=ts_err) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(ts.plot, yerr=td_err) self._check_has_errorbars(ax, xerr=0, yerr=1) # check incorrect lengths and types with tm.assertRaises(ValueError): s.plot(yerr=np.arange(11)) s_err = ['zzz'] * 10 # in mpl 1.5+ this is a TypeError with tm.assertRaises((ValueError, TypeError)): s.plot(yerr=s_err) def test_table(self): _check_plot_works(self.series.plot, table=True) _check_plot_works(self.series.plot, table=self.series) @slow def test_series_grid_settings(self): # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 self._check_grid_settings(Series([1, 2, 3]), plotting._series_kinds + plotting._common_kinds) @slow def test_standard_colors(self): for c in ['r', 'red', 'green', '#FF0000']: result = plotting._get_standard_colors(1, color=c) self.assertEqual(result, [c]) result = plotting._get_standard_colors(1, color=[c]) self.assertEqual(result, [c]) result = plotting._get_standard_colors(3, color=c) self.assertEqual(result, [c] * 3) result = plotting._get_standard_colors(3, color=[c]) self.assertEqual(result, [c] * 3) @slow def test_standard_colors_all(self): import matplotlib.colors as colors # multiple colors like mediumaquamarine for c in colors.cnames: result = plotting._get_standard_colors(num_colors=1, color=c) self.assertEqual(result, [c]) result = plotting._get_standard_colors(num_colors=1, color=[c]) self.assertEqual(result, [c]) result = plotting._get_standard_colors(num_colors=3, color=c) self.assertEqual(result, [c] * 3) result = plotting._get_standard_colors(num_colors=3, color=[c]) self.assertEqual(result, [c] * 3) # single letter colors like k for c in colors.ColorConverter.colors: result = plotting._get_standard_colors(num_colors=1, color=c) self.assertEqual(result, [c]) result = plotting._get_standard_colors(num_colors=1, color=[c]) self.assertEqual(result, [c]) result = plotting._get_standard_colors(num_colors=3, color=c) self.assertEqual(result, [c] * 3) result = plotting._get_standard_colors(num_colors=3, color=[c]) self.assertEqual(result, [c] * 3) def test_series_plot_color_kwargs(self): # GH1890 ax = Series(np.arange(12) + 1).plot(color='green') self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_kwargs(self): # #1890 ax = Series(np.arange(12) + 1, index=date_range( '1/1/2000', periods=12)).plot(color='green') self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_with_empty_kwargs(self): import matplotlib as mpl if self.mpl_ge_1_5_0: def_colors = self._maybe_unpack_cycler(mpl.rcParams) else: def_colors = mpl.rcParams['axes.color_cycle'] index = date_range('1/1/2000', periods=12) s = Series(np.arange(1, 13), index=index) ncolors = 3 for i in range(ncolors): ax = s.plot() self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) def test_xticklabels(self): # GH11529 s = Series(np.arange(10), index=['P%02d' % i for i in range(10)]) ax = s.plot(xticks=[0, 3, 5, 9]) exp = ['P%02d' % i for i in [0, 3, 5, 9]] self._check_text_labels(ax.get_xticklabels(), exp) def test_custom_business_day_freq(self): # GH7222 from pandas.tseries.offsets import CustomBusinessDay s = Series(range(100, 121), index=pd.bdate_range( start='2014-05-01', end='2014-06-01', freq=CustomBusinessDay(holidays=['2014-05-26']))) _check_plot_works(s.plot) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
CiscoUcs/Ironic-UCS
refs/heads/master
build/lib.linux-x86_64-2.7/ironic/objects/port.py
7
# coding=utf-8 # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic.common import exception from ironic.common import utils from ironic.db import api as dbapi from ironic.objects import base from ironic.objects import utils as obj_utils class Port(base.IronicObject): # Version 1.0: Initial version # Version 1.1: Add get() and get_by_id() and get_by_address() and # make get_by_uuid() only work with a uuid # Version 1.2: Add create() and destroy() # Version 1.3: Add list() # Version 1.4: Add list_by_node_id() VERSION = '1.4' dbapi = dbapi.get_instance() fields = { 'id': int, 'uuid': obj_utils.str_or_none, 'node_id': obj_utils.int_or_none, 'address': obj_utils.str_or_none, 'extra': obj_utils.dict_or_none, } @staticmethod def _from_db_object(port, db_port): """Converts a database entity to a formal object.""" for field in port.fields: port[field] = db_port[field] port.obj_reset_changes() return port @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Port._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, port_id): """Find a port based on its id or uuid and return a Port object. :param port_id: the id *or* uuid of a port. :returns: a :class:`Port` object. """ if utils.is_int_like(port_id): return cls.get_by_id(context, port_id) elif utils.is_uuid_like(port_id): return cls.get_by_uuid(context, port_id) elif utils.is_valid_mac(port_id): return cls.get_by_address(context, port_id) else: raise exception.InvalidIdentity(identity=port_id) @base.remotable_classmethod def get_by_id(cls, context, port_id): """Find a port based on its integer id and return a Port object. :param port_id: the id of a port. :returns: a :class:`Port` object. """ db_port = cls.dbapi.get_port_by_id(port_id) port = Port._from_db_object(cls(context), db_port) return port @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a port based on uuid and return a :class:`Port` object. :param uuid: the uuid of a port. :param context: Security context :returns: a :class:`Port` object. """ db_port = cls.dbapi.get_port_by_uuid(uuid) port = Port._from_db_object(cls(context), db_port) return port @base.remotable_classmethod def get_by_address(cls, context, address): """Find a port based on address and return a :class:`Port` object. :param address: the address of a port. :param context: Security context :returns: a :class:`Port` object. """ db_port = cls.dbapi.get_port_by_address(address) port = Port._from_db_object(cls(context), db_port) return port @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of Port objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Port` object. """ db_ports = cls.dbapi.get_port_list(limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return Port._from_db_object_list(db_ports, cls, context) @base.remotable_classmethod def list_by_node_id(cls, context, node_id, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of Port objects associated with a given node ID. :param context: Security context. :param node_id: the ID of the node. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Port` object. """ db_ports = cls.dbapi.get_ports_by_node_id(node_id, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return Port._from_db_object_list(db_ports, cls, context) @base.remotable def create(self, context=None): """Create a Port record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Port(context) """ values = self.obj_get_changes() db_port = self.dbapi.create_port(values) self._from_db_object(self, db_port) @base.remotable def destroy(self, context=None): """Delete the Port from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Port(context) """ self.dbapi.destroy_port(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Port. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Port(context) """ updates = self.obj_get_changes() self.dbapi.update_port(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this Port. Loads a port with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded port column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Port(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field]
makei/gnome15
refs/heads/master
src/plugins/volume/volume.py
8
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets # Copyright (C) 2010 Brett Smith <tanktarta@blueyonder.co.uk> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gnome15.g15locale as g15locale _ = g15locale.get_translation("volume", modfile = __file__).ugettext import gnome15.g15screen as g15screen import gnome15.util.g15scheduler as g15scheduler import gnome15.util.g15uigconf as g15uigconf import gnome15.util.g15gconf as g15gconf import gnome15.util.g15icontools as g15icontools import gnome15.g15theme as g15theme import gnome15.g15driver as g15driver import gnome15.g15devices as g15devices import gnome15.g15actions as g15actions import alsaaudio import select import os import gtk import logging logger = logging.getLogger(__name__) from threading import Thread # Custom actions VOLUME_UP = "volume-up" VOLUME_DOWN = "volume-down" MUTE = "mute" # Register the action with all supported models g15devices.g15_action_keys[VOLUME_UP] = g15actions.ActionBinding(VOLUME_UP, [ g15driver.G_KEY_VOL_UP ], g15driver.KEY_STATE_UP) g15devices.g19_action_keys[VOLUME_UP] = g15actions.ActionBinding(VOLUME_UP, [ g15driver.G_KEY_VOL_UP ], g15driver.KEY_STATE_UP) g15devices.g15_action_keys[VOLUME_DOWN] = g15actions.ActionBinding(VOLUME_DOWN, [ g15driver.G_KEY_VOL_DOWN ], g15driver.KEY_STATE_UP) g15devices.g19_action_keys[VOLUME_DOWN] = g15actions.ActionBinding(VOLUME_DOWN, [ g15driver.G_KEY_VOL_DOWN ], g15driver.KEY_STATE_UP) g15devices.g15_action_keys[MUTE] = g15actions.ActionBinding(MUTE, [ g15driver.G_KEY_MUTE ], g15driver.KEY_STATE_UP) g15devices.g19_action_keys[MUTE] = g15actions.ActionBinding(MUTE, [ g15driver.G_KEY_MUTE ], g15driver.KEY_STATE_UP) # Plugin details - All of these must be provided id="volume" name=_("Volume Monitor") description=_("Uses the M-Key lights as a volume meter. If your model has \ a screen, a page will also popup showing the current volume. \ You may choose the mixer that is monitored in the preferences for this plugin.\n\n \ This plugin also registers some actions that may be assigned to macro keys. \ The actions volume-up, volume-down and mute all work directly on the mixer, \ so may be used control the master volume when full screen games are running too.") author="Brett Smith <tanktarta@blueyonder.co.uk>" copyright=_("Copyright (C)2010 Brett Smith") site="http://www.russo79.com/gnome15" has_preferences=True default_enabled=True unsupported_models = [ g15driver.MODEL_G930, g15driver.MODEL_G35 ] actions={ VOLUME_UP : "Increase the volume", VOLUME_DOWN : "Decrease the volume", MUTE : "Mute", } ''' This plugin displays a high priority screen when the volume is changed for a fixed number of seconds ''' def create(gconf_key, gconf_client, screen): return G15Volume(screen, gconf_client, gconf_key) def show_preferences(parent, driver, gconf_client, gconf_key): def refresh_devices(widget): new_soundcard_name = soundcard_model[widget.get_active()][0] new_soundcard_index = alsa_soundcards.index(new_soundcard_name) ''' We temporarily block the handler for the mixer_combo 'changed' signal, since we are going to change the combobox contents. ''' mixer_combo.handler_block(changed_handler_id) mixer_model.clear() for mixer in alsaaudio.mixers(new_soundcard_index): mixer_model.append([mixer]) # Now we can unblock the handler mixer_combo.handler_unblock(changed_handler_id) # And since the list of mixers has changed, we select the first one by default mixer_combo.set_active(0) widget_tree = gtk.Builder() widget_tree.add_from_file(os.path.join(os.path.dirname(__file__), "volume.ui")) dialog = widget_tree.get_object("VolumeDialog") soundcard_combo = widget_tree.get_object('SoundcardCombo') mixer_combo = widget_tree.get_object('MixerCombo') soundcard_model = widget_tree.get_object("SoundcardModel") mixer_model = widget_tree.get_object("MixerModel") alsa_soundcards = alsaaudio.cards() soundcard_name = g15gconf.get_string_or_default(gconf_client, gconf_key + "/soundcard", str(alsa_soundcards[0])) soundcard_index = alsa_soundcards.index(soundcard_name) soundcard_mixers = alsaaudio.mixers(soundcard_index) for card in alsa_soundcards: soundcard_model.append([card]) for mixer in soundcard_mixers: mixer_model.append([mixer]) g15uigconf.configure_combo_from_gconf(gconf_client, \ gconf_key + "/soundcard", \ "SoundcardCombo", \ str(alsa_soundcards[0]), \ widget_tree) changed_handler_id = g15uigconf.configure_combo_from_gconf(gconf_client, \ gconf_key + "/mixer", \ "MixerCombo", \ str(soundcard_mixers[0]), \ widget_tree) soundcard_combo.connect('changed', refresh_devices) dialog.set_transient_for(parent) dialog.run() dialog.hide() class G15Volume(): def __init__(self, screen, gconf_client, gconf_key): self._screen = screen self._gconf_client = gconf_client self._gconf_key = gconf_key self._volume = 0.0 self._volthread = None self._mute = False self._light_controls = None self._lights_timer = None self._reload_config_timer = None def activate(self): self._screen.key_handler.action_listeners.append(self) self._activated = True self._read_config() self._start_monitoring() self._notify_handler = self._gconf_client.notify_add(self._gconf_key, self._config_changed); def deactivate(self): self._screen.key_handler.action_listeners.remove(self) self._activated = False self._stop_monitoring() self._gconf_client.notify_remove(self._notify_handler) def destroy(self): pass def action_performed(self, binding): if binding.action in [ VOLUME_UP, VOLUME_DOWN, MUTE ]: vol_mixer = self._open_mixer() try : if binding.action == MUTE: # Handle mute mute = False mutes = None try : mutes = vol_mixer.getmute() except alsaaudio.ALSAAudioError as e: logger.debug("Could not get mute channel. Trying PCM mixer", exc_info = e) if vol_mixer is not None: vol_mixer.close() # Some pulse weirdness maybe? vol_mixer = self._open_mixer("PCM", self.current_card_index) try : mutes = vol_mixer.getmute() except alsaaudio.ALSAAudioError as e: logger.warning("No mute switch found", exc_info = e) if mutes != None: for ch_mute in mutes: if ch_mute: mute = True vol_mixer.setmute(1 if not mute else 0) else: volumes = vol_mixer.getvolume() total = 0 for vol in volumes: total += vol volume = total / len(volumes) if binding.action == VOLUME_UP and volume < 100: volume += 10 vol_mixer.setvolume(min(volume, 100)) elif binding.action == VOLUME_DOWN and volume > 0: volume -= 10 vol_mixer.setvolume(max(volume, 0)) finally : if vol_mixer is not None: vol_mixer.close() ''' Functions specific to plugin ''' def _start_monitoring(self): self._volthread = VolumeThread(self) self._volthread.start() def _config_changed(self, client, connection_id, entry, args): ''' If the user changes the soundcard on the preferences dialog this method would be called two times. A first time for the soundcard change, and a second time because the first mixer of the newly selected soundcard is automatically selected. The volume monitoring would then be restarted twice, which makes no sense. Instead of restarting the monitoring as soon as this method is called, we put it as a task on a queue for 1 second. If during that time, any other change happens to the configuration, the previous restart request is cancelled, and another one takes it's place. This way, the monitoring is only restarted once when the user selects another sound card. ''' if self._reload_config_timer is not None: if not self._reload_config_timer.is_complete(): self._reload_config_timer.cancel() self._reload_config_timer = None self._reload_config_timer = g15scheduler.queue('VolumeMonitorQueue', 'RestartVolumeMonitoring', 1.0, self._restart_monitoring) def _restart_monitoring(self): self._stop_monitoring() self._read_config() self._start_monitoring() def _read_config(self): self.soundcard_name = g15gconf.get_string_or_default(self._gconf_client, \ self._gconf_key + "/soundcard", \ str(alsaaudio.cards()[0])) self.soundcard_index = alsaaudio.cards().index(self.soundcard_name) self.mixer_name = g15gconf.get_string_or_default(self._gconf_client, \ self._gconf_key + "/mixer", \ str(alsaaudio.mixers(self.soundcard_index)[0])) if not self.mixer_name in alsaaudio.mixers(self.soundcard_index): self.mixer_name = str(alsaaudio.mixers(self.soundcard_index)[0]) self._gconf_client.set_string(self._gconf_key + "/mixer", self.mixer_name) def _stop_monitoring(self): if self._volthread != None: self._volthread._stop_monitoring() self._volthread.join(1.0) def _get_theme_properties(self): properties = {} icon = "audio-volume-muted" if not self._mute: if self._volume < 34: icon = "audio-volume-low" elif self._volume < 67: icon = "audio-volume-medium" else: icon = "audio-volume-high" else: properties [ "muted"] = True icon_path = g15icontools.get_icon_path(icon, self._screen.driver.get_size()[0]) properties["state"] = icon properties["icon"] = icon_path properties["vol_pc"] = self._volume for i in range(0, int( self._volume / 10 ) + 1, 1): properties["bar" + str(i)] = True return properties def _release_lights(self): if self._light_controls is not None: self._screen.driver.release_control(self._light_controls) self._light_controls = None def _open_mixer(self, mixer_name = None): mixer_name = self.mixer_name if mixer_name is None else mixer_name if not mixer_name or mixer_name == "": mixer_name = "Master" logger.info("Opening soundcard %s mixer %s", self.soundcard_name, mixer_name) vol_mixer = alsaaudio.Mixer(mixer_name, cardindex=self.soundcard_index) return vol_mixer def _popup(self): if not self._activated: logger.warning("Cannot popup volume when it is deactivated. This suggests the volume thread has not died.") return if not self._light_controls: self._light_controls = self._screen.driver.acquire_control_with_hint(g15driver.HINT_MKEYS) if self._lights_timer is not None: self._lights_timer.cancel() if self._light_controls is not None: self._lights_timer = g15scheduler.schedule("ReleaseMKeyLights", 3.0, self._release_lights) page = self._screen.get_page(id) if page == None: if self._screen.driver.get_bpp() != 0: page = g15theme.G15Page(id, self._screen, priority=g15screen.PRI_HIGH, title="Volume", theme = g15theme.G15Theme(self), \ theme_properties_callback = self._get_theme_properties, originating_plugin = self) self._screen.delete_after(3.0, page) self._screen.add_page(page) else: self._screen.raise_page(page) self._screen.delete_after(3.0, page) vol_mixer = self._open_mixer() mute_mixer = None try : # Handle mute mute = False mutes = None try : mutes = vol_mixer.getmute() except alsaaudio.ALSAAudioError as e: logger.debug("Could note get mute channel. Trying PCM", exc_info = e) # Some pulse weirdness maybe? mute_mixer = alsaaudio.Mixer("PCM", cardindex=self.soundcard_index) try : mutes = mute_mixer.getmute() except alsaaudio.ALSAAudioError as e: logger.warning("No mute switch found", exc_info = e) if mutes != None: for ch_mute in mutes: if ch_mute: mute = True # TODO better way than averaging volumes = vol_mixer.getvolume() finally : vol_mixer.close() if mute_mixer: mute_mixer.close() total = 0 for vol in volumes: total += vol volume = total / len(volumes) self._volume = volume if self._light_controls is not None: if self._volume > 90: self._light_controls.set_value(g15driver.MKEY_LIGHT_MR | g15driver.MKEY_LIGHT_1 | g15driver.MKEY_LIGHT_2 | g15driver.MKEY_LIGHT_3) elif self._volume > 75: self._light_controls.set_value(g15driver.MKEY_LIGHT_1 | g15driver.MKEY_LIGHT_2 | g15driver.MKEY_LIGHT_3) elif self._volume > 50: self._light_controls.set_value(g15driver.MKEY_LIGHT_1 | g15driver.MKEY_LIGHT_2) elif self._volume > 25: self._light_controls.set_value(g15driver.MKEY_LIGHT_1) else: self._light_controls.set_value(0) self._mute = mute self._screen.redraw(page) class VolumeThread(Thread): def __init__(self, volume): Thread.__init__(self) self.name = "VolumeThread" self.setDaemon(True) self._volume = volume logger.info("Opening soundcard %s mixer %s", volume.soundcard_name, volume.mixer_name) self._mixer = alsaaudio.Mixer(volume.mixer_name, cardindex=volume.soundcard_index) self._poll_desc = self._mixer.polldescriptors() self._poll = select.poll() self._fd = self._poll_desc[0][0] self._event_mask = self._poll_desc[0][1] self._open = os.fdopen(self._fd) self._poll.register(self._open, select.POLLIN) self._stop = False def _stop_monitoring(self): self._stop = True self._open.close() self._mixer.close() def run(self): try : while not self._stop: if self._poll.poll(5): if self._stop: break g15scheduler.schedule("popupVolume", 0, self._volume._popup) if not self._open.read(): break finally: try : self._poll.unregister(self._open) except Exception as e: logger.debug("Error when unregistering", exc_info = e) pass self._open.close()
lukeiwanski/tensorflow-opencl
refs/heads/master
tensorflow/contrib/sparsemax/python/ops/sparsemax.py
104
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sparsemax op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.platform import resource_loader __all__ = ["sparsemax"] def sparsemax(logits, name=None): """Computes sparsemax activations [1]. For each batch `i` and class `j` we have sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0) [1]: https://arxiv.org/abs/1602.02068 Args: logits: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. """ with ops.name_scope(name, "sparsemax", [logits]) as name: logits = ops.convert_to_tensor(logits, name="logits") obs = array_ops.shape(logits)[0] dims = array_ops.shape(logits)[1] z = logits - math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis] # sort z z_sorted, _ = nn.top_k(z, k=dims) # calculate k(z) z_cumsum = math_ops.cumsum(z_sorted, axis=1) k = math_ops.range( 1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype) z_check = 1 + k * z_sorted > z_cumsum # because the z_check vector is always [1,1,...1,0,0,...0] finding the # (index + 1) of the last `1` is the same as just summing the number of 1. k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1) # calculate tau(z) indices = array_ops.stack([math_ops.range(0, obs), k_z - 1], axis=1) tau_sum = array_ops.gather_nd(z_cumsum, indices) tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype) # calculate p return math_ops.maximum( math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
GoogleCloudPlatform/datacatalog-connectors-hive
refs/heads/master
google-datacatalog-hive-connector/src/google/datacatalog_connectors/hive/__init__.py
9
#!/usr/bin/python # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .datacatalog_cli import main __all__ = ['main']
lvgilmore/Luke
refs/heads/master
LukeClient/Disks.py
1
#! /usr/bin/python # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from LukeClient.utils.Utils import produce_command class Disks(object): def __init__(self): self.allDisks = produce_command("lsblk | grep ^[a-z] | awk '{print $1}'") self.disksObject = {} self.get_all_disks() def get_all_disks(self): disksList = {} for disk in self.allDisks.split(): disksList.update( {disk: Disk(produce_command("sudo fdisk -l | grep /dev/sda: |awk '{print $3$4}'"), produce_command("cat /sys/block/sd?/device/vendor"))}) self.init_disks_object(disksList) def init_disks_object(self, disksList): for k, v in disksList.items(): self.disksObject.update({k: v.diskObject}) class Disk(object): def __init__(self, size, vendor): self.diskObject = {'Size' : size, 'Vendor' : vendor}
dajusc/trimesh
refs/heads/master
tests/test_ray.py
1
try: from . import generic as g except BaseException: import generic as g class RayTests(g.unittest.TestCase): def test_rays(self): meshes = [g.get_mesh(**k) for k in g.data['ray_data']['load_kwargs']] rays = g.data['ray_data']['rays'] names = [m.metadata['file_name'] for m in meshes] hit_id = [] hit_loc = [] hit_any = [] for m in meshes: name = m.metadata['file_name'] hit_any.append(m.ray.intersects_any(**rays[name])) hit_loc.append(m.ray.intersects_location(**rays[name])[0]) hit_id.append(m.ray.intersects_id(**rays[name])) hit_any = g.np.array(hit_any, dtype=g.np.int) for i in g.trimesh.grouping.group( g.np.unique(names, return_inverse=True)[1]): broken = hit_any[i].astype(g.np.int).ptp(axis=0).sum() assert broken == 0 def test_rps(self): for use_embree in [True, False]: dimension = (10000, 3) sphere = g.get_mesh('unit_sphere.STL', use_embree=use_embree) ray_origins = g.np.random.random(dimension) ray_directions = g.np.tile([0, 0, 1], (dimension[0], 1)) ray_origins[:, 2] = -5 # force ray object to allocate tree before timing it # tree = sphere.ray.tree tic = [g.time.time()] a = sphere.ray.intersects_id( ray_origins, ray_directions) tic.append(g.time.time()) b = sphere.ray.intersects_location( ray_origins, ray_directions) tic.append(g.time.time()) # make sure ray functions always return numpy arrays assert all(len(i.shape) >= 0 for i in a) assert all(len(i.shape) >= 0 for i in b) rps = dimension[0] / g.np.diff(tic) g.log.info('Measured %s rays/second with embree %d', str(rps), use_embree) def test_empty(self): """ Test queries with no hits """ for use_embree in [True, False]: dimension = (100, 3) sphere = g.get_mesh('unit_sphere.STL', use_embree=use_embree) # should never hit the sphere ray_origins = g.np.random.random(dimension) ray_directions = g.np.tile([0, 1, 0], (dimension[0], 1)) ray_origins[:, 2] = -5 # make sure ray functions always return numpy arrays # these functions return multiple results all of which # should always be a numpy array assert all(len(i.shape) >= 0 for i in sphere.ray.intersects_id( ray_origins, ray_directions)) assert all(len(i.shape) >= 0 for i in sphere.ray.intersects_location( ray_origins, ray_directions)) def test_contains(self): scale = 1.5 for use_embree in [True, False]: mesh = g.get_mesh('unit_cube.STL', use_embree=use_embree) g.log.info('Contains test ray engine: ' + str(mesh.ray.__class__)) test_on = mesh.ray.contains_points(mesh.vertices) # NOQA test_in = mesh.ray.contains_points(mesh.vertices * (1.0 / scale)) assert test_in.all() test_out = mesh.ray.contains_points(mesh.vertices * scale) assert not test_out.any() points_way_out = ( g.np.random.random( (30, 3)) * 100) + 1.0 + mesh.bounds[1] test_way_out = mesh.ray.contains_points(points_way_out) assert not test_way_out.any() test_centroid = mesh.ray.contains_points([mesh.center_mass]) assert test_centroid.all() def test_on_vertex(self): for use_embree in [True, False]: m = g.trimesh.primitives.Box(use_embree=False) origins = g.np.zeros_like(m.vertices) vectors = m.vertices.copy() assert m.ray.intersects_any(ray_origins=origins, ray_directions=vectors).all() (locations, index_ray, index_tri) = m.ray.intersects_location(ray_origins=origins, ray_directions=vectors) hit_count = g.np.bincount(index_ray, minlength=len(origins)) assert (hit_count == 1).all() def test_on_edge(self): for use_embree in [True, False]: m = g.get_mesh('7_8ths_cube.stl') points = [[4.5, 0, -23], [4.5, 0, -2], [0, 0, -1e-6], [0, 0, -1]] truth = [False, True, True, True] result = g.trimesh.ray.ray_util.contains_points(m.ray, points) assert (result == truth).all() def test_multiple_hits(self): """ """ # Set camera focal length (in pixels) f = g.np.array([1000., 1000.]) h, w = 256, 256 # Set up a list of ray directions - one for each pixel in our (256, # 256) output image. ray_directions = g.trimesh.util.grid_arange( [[-h / 2, -w / 2], [h / 2, w / 2]], step=2.0) ray_directions = g.np.column_stack( (ray_directions, g.np.ones(len(ray_directions)) * f[0])) # Initialize the camera origin to be somewhere behind the cube. cam_t = g.np.array([0, 0, -15.]) # Duplicate to ensure we have an camera_origin per ray direction ray_origins = g.np.tile(cam_t, (ray_directions.shape[0], 1)) for use_embree in [True, False]: # Generate a 1 x 1 x 1 cube using the trimesh box primitive cube_mesh = g.trimesh.primitives.Box(extents=[2, 2, 2], use_embree=use_embree) # Perform 256 * 256 raycasts, one for each pixel on the image # plane. We only want the 'first' hit. index_triangles, index_ray = cube_mesh.ray.intersects_id( ray_origins=ray_origins, ray_directions=ray_directions, multiple_hits=False) assert len(g.np.unique(index_triangles)) == 2 index_triangles, index_ray = cube_mesh.ray.intersects_id( ray_origins=ray_origins, ray_directions=ray_directions, multiple_hits=True) assert len(g.np.unique(index_triangles)) > 2 def test_contain_single(self): # not watertight mesh = g.get_mesh("teapot.stl", use_embree=False) # sample a grid of points (n,3) points = mesh.bounding_box.sample_grid(step=2.0) # to a contains check on every point contained = mesh.ray.contains_points(points) assert len(points) == len(contained) # not contained and should surface a bug for point in mesh.bounding_box.vertices: mesh.ray.contains_points([point]) def test_box(self): """ Run box- ray intersection along Z and make sure XY match ray origin XY. """ for kwargs in [{'use_embree': True}, {'use_embree': False}]: mesh = g.get_mesh('unit_cube.STL', **kwargs) # grid is across meshes XY profile origins = g.trimesh.util.grid_linspace(mesh.bounds[:, :2] + g.np.reshape( [-.02, .02], (-1, 1)), 100) origins = g.np.column_stack(( origins, g.np.ones(len(origins)) * -100)) # all vectors are along Z axis vectors = g.np.ones((len(origins), 3)) * [0, 0, 1.0] # (n,3) float intersection position in space # (n,) int, index of original ray # (m,) int, index of mesh.faces pos, ray, tri = mesh.ray.intersects_location( ray_origins=origins, ray_directions=vectors) for p, r in zip(pos, ray): # intersect location XY should match ray origin XY assert g.np.allclose(p[:2], origins[r][:2]) # the Z of the hit should be on the cube's # top or bottom face assert g.np.isclose(p[2], mesh.bounds[:, 2]).any() def test_broken(self): """ Test a mesh with badly defined face normals """ ray_origins = g.np.array([[0.12801793, 24.5030052, -5.], [0.12801793, 24.5030052, -5.]]) ray_directions = g.np.array([[-0.13590759, -0.98042506, 0.], [0.13590759, 0.98042506, -0.]]) for kwargs in [{'use_embree': True}, {'use_embree': False}]: mesh = g.get_mesh('broken.STL', **kwargs) locations, index_ray, index_tri = mesh.ray.intersects_location( ray_origins=ray_origins, ray_directions=ray_directions) # should be same number of location hits assert len(locations) == len(ray_origins) if __name__ == '__main__': g.trimesh.util.attach_to_log() g.unittest.main()
shft117/SteckerApp
refs/heads/master
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
17
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cstr, cint from frappe import msgprint, _ from calendar import monthrange def execute(filters=None): if not filters: filters = {} conditions, filters = get_conditions(filters) columns = get_columns(filters) att_map = get_attendance_list(conditions, filters) emp_map = get_employee_details() data = [] for emp in sorted(att_map): emp_det = emp_map.get(emp) if not emp_det: continue row = [emp, emp_det.employee_name, emp_det.branch, emp_det.department, emp_det.designation, emp_det.company] total_p = total_a = 0.0 for day in range(filters["total_days_in_month"]): status = att_map.get(emp).get(day + 1, "Absent") status_map = {"Present": "P", "Absent": "A", "Half Day": "HD"} row.append(status_map[status]) if status == "Present": total_p += 1 elif status == "Absent": total_a += 1 elif status == "Half Day": total_p += 0.5 total_a += 0.5 row += [total_p, total_a] data.append(row) return columns, data def get_columns(filters): columns = [ _("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch")+ ":Link/Branch:120", _("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120", _("Company") + ":Link/Company:120" ] for day in range(filters["total_days_in_month"]): columns.append(cstr(day+1) +"::20") columns += [_("Total Present") + ":Float:80", _("Total Absent") + ":Float:80"] return columns def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql("""select employee, day(att_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, att_date""" % conditions, filters, as_dict=1) att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "") att_map[d.employee][d.day_of_month] = d.status return att_map def get_conditions(filters): if not (filters.get("month") and filters.get("fiscal_year")): msgprint(_("Please select month and year"), raise_exception=1) filters["month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"].index(filters.month) + 1 year_start_date, year_end_date = frappe.db.get_value("Fiscal Year", filters.fiscal_year, ["year_start_date", "year_end_date"]) if filters.month >= year_start_date.strftime("%m"): year = year_start_date.strftime("%Y") else: year = year_end_date.strftime("%Y") filters["total_days_in_month"] = monthrange(cint(year), filters.month)[1] conditions = " and month(att_date) = %(month)s and fiscal_year = %(fiscal_year)s" if filters.get("company"): conditions += " and company = %(company)s" if filters.get("employee"): conditions += " and employee = %(employee)s" return conditions, filters def get_employee_details(): emp_map = frappe._dict() for d in frappe.db.sql("""select name, employee_name, designation, department, branch, company from tabEmployee where docstatus < 2 and status = 'Active'""", as_dict=1): emp_map.setdefault(d.name, d) return emp_map
ABaldwinHunter/django-clone
refs/heads/master
django/template/backends/django.py
28
# Since this package contains a "django" module, this is required on Python 2. from __future__ import absolute_import import sys from importlib import import_module from pkgutil import walk_packages from django.apps import apps from django.conf import settings from django.template import TemplateDoesNotExist from django.template.context import make_context from django.template.engine import Engine from django.template.library import InvalidTemplateLibrary from django.utils import six from .base import BaseEngine class DjangoTemplates(BaseEngine): app_dirname = 'templates' def __init__(self, params): params = params.copy() options = params.pop('OPTIONS').copy() options.setdefault('autoescape', True) options.setdefault('debug', settings.DEBUG) options.setdefault('file_charset', settings.FILE_CHARSET) libraries = options.get('libraries', {}) options['libraries'] = self.get_templatetag_libraries(libraries) super(DjangoTemplates, self).__init__(params) self.engine = Engine(self.dirs, self.app_dirs, **options) def from_string(self, template_code): return Template(self.engine.from_string(template_code), self) def get_template(self, template_name): try: return Template(self.engine.get_template(template_name), self) except TemplateDoesNotExist as exc: reraise(exc, self) def get_templatetag_libraries(self, custom_libraries): """ Return a collation of template tag libraries from installed applications and the supplied custom_libraries argument. """ libraries = get_installed_libraries() libraries.update(custom_libraries) return libraries class Template(object): def __init__(self, template, backend): self.template = template self.backend = backend @property def origin(self): return self.template.origin def render(self, context=None, request=None): context = make_context(context, request, autoescape=self.backend.engine.autoescape) try: return self.template.render(context) except TemplateDoesNotExist as exc: reraise(exc, self.backend) def reraise(exc, backend): """ Reraise TemplateDoesNotExist while maintaining template debug information. """ new = exc.__class__(*exc.args, tried=exc.tried, backend=backend) if hasattr(exc, 'template_debug'): new.template_debug = exc.template_debug six.reraise(exc.__class__, new, sys.exc_info()[2]) def get_installed_libraries(): """ Return the built-in template tag libraries and those from installed applications. Libraries are stored in a dictionary where keys are the individual module names, not the full module paths. Example: django.templatetags.i18n is stored as i18n. """ libraries = {} candidates = ['django.templatetags'] candidates.extend( '%s.templatetags' % app_config.name for app_config in apps.get_app_configs()) for candidate in candidates: try: pkg = import_module(candidate) except ImportError: # No templatetags package defined. This is safe to ignore. continue if hasattr(pkg, '__path__'): for name in get_package_libraries(pkg): libraries[name[len(candidate) + 1:]] = name return libraries def get_package_libraries(pkg): """ Recursively yield template tag libraries defined in submodules of a package. """ for entry in walk_packages(pkg.__path__, pkg.__name__ + '.'): try: module = import_module(entry[1]) except ImportError as e: raise InvalidTemplateLibrary( "Invalid template library specified. ImportError raised when " "trying to load '%s': %s" % (entry[1], e) ) if hasattr(module, 'register'): yield entry[1]
hellhovnd/django
refs/heads/master
django/contrib/contenttypes/generic.py
3
""" Classes allowing "generic" relations through ContentType and object-id fields. """ from __future__ import unicode_literals from collections import defaultdict from functools import partial from django.core.exceptions import ObjectDoesNotExist from django.db import connection from django.db import models, router, DEFAULT_DB_ALIAS from django.db.models import signals from django.db.models.fields.related import ForeignObject, ForeignObjectRel from django.db.models.related import PathInfo from django.db.models.sql.where import Constraint from django.forms import ModelForm from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets from django.contrib.contenttypes.models import ContentType from django.utils import six from django.utils.deprecation import RenameMethodsBase from django.utils.encoding import smart_text class RenameGenericForeignKeyMethods(RenameMethodsBase): renamed_methods = ( ('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning), ) class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)): """ Provides a generic relation to any object through content-type/object-id fields. """ def __init__(self, ct_field="content_type", fk_field="object_id"): self.ct_field = ct_field self.fk_field = fk_field def contribute_to_class(self, cls, name): self.name = name self.model = cls self.cache_attr = "_%s_cache" % name cls._meta.add_virtual_field(self) # For some reason I don't totally understand, using weakrefs here doesn't work. signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False) # Connect myself as the descriptor for this field setattr(cls, name, self) def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs): """ Handles initializing an object with the generic FK instead of content-type/object-id fields. """ if self.name in kwargs: value = kwargs.pop(self.name) kwargs[self.ct_field] = self.get_content_type(obj=value) kwargs[self.fk_field] = value._get_pk_val() def get_content_type(self, obj=None, id=None, using=None): if obj is not None: return ContentType.objects.db_manager(obj._state.db).get_for_model(obj) elif id: return ContentType.objects.db_manager(using).get_for_id(id) else: # This should never happen. I love comments like this, don't you? raise Exception("Impossible arguments to GFK.get_content_type!") def get_prefetch_queryset(self, instances): # For efficiency, group the instances by content type and then do one # query per model fk_dict = defaultdict(set) # We need one instance for each group in order to get the right db: instance_dict = {} ct_attname = self.model._meta.get_field(self.ct_field).get_attname() for instance in instances: # We avoid looking for values if either ct_id or fkey value is None ct_id = getattr(instance, ct_attname) if ct_id is not None: fk_val = getattr(instance, self.fk_field) if fk_val is not None: fk_dict[ct_id].add(fk_val) instance_dict[ct_id] = instance ret_val = [] for ct_id, fkeys in fk_dict.items(): instance = instance_dict[ct_id] ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) # For doing the join in Python, we have to match both the FK val and the # content type, so we use a callable that returns a (fk, class) pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: return None else: model = self.get_content_type(id=ct_id, using=obj._state.db).model_class() return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)), model) return (ret_val, lambda obj: (obj._get_pk_val(), obj.__class__), gfk_key, True, self.cache_attr) def is_cached(self, instance): return hasattr(instance, self.cache_attr) def __get__(self, instance, instance_type=None): if instance is None: return self try: return getattr(instance, self.cache_attr) except AttributeError: rel_obj = None # Make sure to use ContentType.objects.get_for_id() to ensure that # lookups are cached (see ticket #5570). This takes more code than # the naive ``getattr(instance, self.ct_field)``, but has better # performance when dealing with GFKs in loops and such. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) if ct_id: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field)) except ObjectDoesNotExist: pass setattr(instance, self.cache_attr, rel_obj) return rel_obj def __set__(self, instance, value): if instance is None: raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name) ct = None fk = None if value is not None: ct = self.get_content_type(obj=value) fk = value._get_pk_val() setattr(instance, self.ct_field, ct) setattr(instance, self.fk_field, fk) setattr(instance, self.cache_attr, value) class GenericRelation(ForeignObject): """Provides an accessor to generic related objects (e.g. comments)""" def __init__(self, to, **kwargs): kwargs['verbose_name'] = kwargs.get('verbose_name', None) kwargs['rel'] = GenericRel( self, to, related_name=kwargs.pop('related_name', None), limit_choices_to=kwargs.pop('limit_choices_to', None),) # Override content-type/object-id field names on the related class self.object_id_field_name = kwargs.pop("object_id_field", "object_id") self.content_type_field_name = kwargs.pop("content_type_field", "content_type") kwargs['blank'] = True kwargs['editable'] = False kwargs['serialize'] = False # This construct is somewhat of an abuse of ForeignObject. This field # represents a relation from pk to object_id field. But, this relation # isn't direct, the join is generated reverse along foreign key. So, # the from_field is object_id field, to_field is pk because of the # reverse join. super(GenericRelation, self).__init__( to, to_fields=[], from_fields=[self.object_id_field_name], **kwargs) def resolve_related_fields(self): self.to_fields = [self.model._meta.pk.name] return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0], self.model._meta.pk)] def get_reverse_path_info(self): opts = self.rel.to._meta target = opts.get_field_by_name(self.object_id_field_name)[0] return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)] def get_choices_default(self): return super(GenericRelation, self).get_choices(include_blank=False) def value_to_string(self, obj): qs = getattr(obj, self.name).all() return smart_text([instance._get_pk_val() for instance in qs]) def get_joining_columns(self, reverse_join=False): if not reverse_join: # This error message is meant for the user, and from user # perspective this is a reverse join along the GenericRelation. raise ValueError('Joining in reverse direction not allowed.') return super(GenericRelation, self).get_joining_columns(reverse_join) def contribute_to_class(self, cls, name): super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True) # Save a reference to which model this class is on for future use self.model = cls # Add the descriptor for the relation setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self)) def contribute_to_related_class(self, cls, related): pass def set_attributes_from_rel(self): pass def get_internal_type(self): return "ManyToManyField" def get_content_type(self): """ Returns the content type associated with this field's model. """ return ContentType.objects.get_for_model(self.model) def get_extra_restriction(self, where_class, alias, remote_alias): field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0] contenttype_pk = self.get_content_type().pk cond = where_class() cond.add((Constraint(remote_alias, field.column, field), 'exact', contenttype_pk), 'AND') return cond def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ return self.rel.to._base_manager.db_manager(using).filter(**{ "%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(self.model).pk, "%s__in" % self.object_id_field_name: [obj.pk for obj in objs] }) class ReverseGenericRelatedObjectsDescriptor(object): """ This class provides the functionality that makes the related-object managers available as attributes on a model class, for fields that have multiple "remote" values and have a GenericRelation defined in their model (rather than having another model pointed *at* them). In the example "article.publications", the publications attribute is a ReverseGenericRelatedObjectsDescriptor instance. """ def __init__(self, field): self.field = field def __get__(self, instance, instance_type=None): if instance is None: return self # Dynamically create a class that subclasses the related model's # default manager. rel_model = self.field.rel.to superclass = rel_model._default_manager.__class__ RelatedManager = create_generic_related_manager(superclass) qn = connection.ops.quote_name content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance) join_cols = self.field.get_joining_columns(reverse_join=True)[0] manager = RelatedManager( model = rel_model, instance = instance, source_col_name = qn(join_cols[0]), target_col_name = qn(join_cols[1]), content_type = content_type, content_type_field_name = self.field.content_type_field_name, object_id_field_name = self.field.object_id_field_name, prefetch_cache_name = self.field.attname, ) return manager def __set__(self, instance, value): if instance is None: raise AttributeError("Manager must be accessed via instance") manager = self.__get__(instance) manager.clear() for obj in value: manager.add(obj) def create_generic_related_manager(superclass): """ Factory function for a manager that subclasses 'superclass' (which is a Manager) and adds behavior for generic related objects. """ class GenericRelatedObjectManager(superclass): def __init__(self, model=None, instance=None, symmetrical=None, source_col_name=None, target_col_name=None, content_type=None, content_type_field_name=None, object_id_field_name=None, prefetch_cache_name=None): super(GenericRelatedObjectManager, self).__init__() self.model = model self.content_type = content_type self.symmetrical = symmetrical self.instance = instance self.source_col_name = source_col_name self.target_col_name = target_col_name self.content_type_field_name = content_type_field_name self.object_id_field_name = object_id_field_name self.prefetch_cache_name = prefetch_cache_name self.pk_val = self.instance._get_pk_val() self.core_filters = { '%s__pk' % content_type_field_name: content_type.id, '%s__exact' % object_id_field_name: instance._get_pk_val(), } def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): db = self._db or router.db_for_read(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters) def get_prefetch_queryset(self, instances): db = self._db or router.db_for_read(self.model, instance=instances[0]) query = { '%s__pk' % self.content_type_field_name: self.content_type.id, '%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances) } qs = super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**query) # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python return (qs, lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)), lambda obj: obj._get_pk_val(), False, self.prefetch_cache_name) def add(self, *objs): for obj in objs: if not isinstance(obj, self.model): raise TypeError("'%s' instance expected" % self.model._meta.object_name) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) obj.save() add.alters_data = True def remove(self, *objs): db = router.db_for_write(self.model, instance=self.instance) for obj in objs: obj.delete(using=db) remove.alters_data = True def clear(self): db = router.db_for_write(self.model, instance=self.instance) for obj in self.all(): obj.delete(using=db) clear.alters_data = True def create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).create(**kwargs) create.alters_data = True return GenericRelatedObjectManager class GenericRel(ForeignObjectRel): def __init__(self, field, to, related_name=None, limit_choices_to=None): super(GenericRel, self).__init__(field, to, related_name, limit_choices_to) class BaseGenericInlineFormSet(BaseModelFormSet): """ A formset for generic inline objects to a parent. """ def __init__(self, data=None, files=None, instance=None, save_as_new=None, prefix=None, queryset=None): opts = self.model._meta self.instance = instance self.rel_name = '-'.join(( opts.app_label, opts.model_name, self.ct_field.name, self.ct_fk_field.name, )) if self.instance is None or self.instance.pk is None: qs = self.model._default_manager.none() else: if queryset is None: queryset = self.model._default_manager qs = queryset.filter(**{ self.ct_field.name: ContentType.objects.get_for_model(self.instance), self.ct_fk_field.name: self.instance.pk, }) super(BaseGenericInlineFormSet, self).__init__( queryset=qs, data=data, files=files, prefix=prefix ) @classmethod def get_default_prefix(cls): opts = cls.model._meta return '-'.join((opts.app_label, opts.model_name, cls.ct_field.name, cls.ct_fk_field.name, )) def save_new(self, form, commit=True): kwargs = { self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk, self.ct_fk_field.get_attname(): self.instance.pk, } new_obj = self.model(**kwargs) return save_instance(form, new_obj, commit=commit) def generic_inlineformset_factory(model, form=ModelForm, formset=BaseGenericInlineFormSet, ct_field="content_type", fk_field="object_id", fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, validate_max=False): """ Returns a ``GenericInlineFormSet`` for the given kwargs. You must provide ``ct_field`` and ``object_id`` if they different from the defaults ``content_type`` and ``object_id`` respectively. """ opts = model._meta # if there is no field called `ct_field` let the exception propagate ct_field = opts.get_field(ct_field) if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType: raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field) fk_field = opts.get_field(fk_field) # let the exception propagate if exclude is not None: exclude = list(exclude) exclude.extend([ct_field.name, fk_field.name]) else: exclude = [ct_field.name, fk_field.name] FormSet = modelformset_factory(model, form=form, formfield_callback=formfield_callback, formset=formset, extra=extra, can_delete=can_delete, can_order=can_order, fields=fields, exclude=exclude, max_num=max_num, validate_max=validate_max) FormSet.ct_field = ct_field FormSet.ct_fk_field = fk_field return FormSet class GenericInlineModelAdmin(InlineModelAdmin): ct_field = "content_type" ct_fk_field = "object_id" formset = BaseGenericInlineFormSet def get_formset(self, request, obj=None, **kwargs): if self.declared_fieldsets: fields = flatten_fieldsets(self.declared_fieldsets) else: fields = None if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # GenericInlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "ct_field": self.ct_field, "fk_field": self.ct_fk_field, "form": self.form, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "formset": self.formset, "extra": self.extra, "can_delete": can_delete, "can_order": False, "fields": fields, "max_num": self.max_num, "exclude": exclude } defaults.update(kwargs) return generic_inlineformset_factory(self.model, **defaults) class GenericStackedInline(GenericInlineModelAdmin): template = 'admin/edit_inline/stacked.html' class GenericTabularInline(GenericInlineModelAdmin): template = 'admin/edit_inline/tabular.html'
sourabhjains/Selfconfiguring-red
refs/heads/master
src/internet/bindings/callbacks_list.py
19
callback_classes = [ ['void', 'ns3::Ipv6Address', 'unsigned char', 'unsigned char', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ipv4Address', 'unsigned char', 'unsigned char', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv6Address', 'ns3::Ipv6Address', 'unsigned char', 'ns3::Ptr<ns3::Ipv6Route>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv4Address', 'ns3::Ipv4Address', 'unsigned char', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::ArpCache const>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ]
meilalina/kotlin-web-site
refs/heads/master
src/pages/MyFlatPages.py
6
import re from flask_flatpages.flatpages import FlatPages from werkzeug.utils import import_string from src.pages.MyPage import MyPage class MyFlatPages(FlatPages): def _parse(self, content, path): """Parse a flatpage file, i.e. read and parse its meta data and body. :return: initialized :class:`Page` instance. """ content = content.replace("\r\n", "\n") try: _, meta, content = re.compile(r'^-{3,}$', re.MULTILINE).split(content, 2) except: raise Exception("Can't find two --- markers in " + path) # Now we ready to get HTML renderer function html_renderer = self.config('html_renderer') # If function is not callable yet, import it if not callable(html_renderer): html_renderer = import_string(html_renderer) # Make able to pass custom arguments to renderer function html_renderer = self._smart_html_renderer(html_renderer) # Initialize and return Page instance return MyPage(path, meta, content, html_renderer)
ept/windmill
refs/heads/master
windmill/management/commands/test_windmill.py
1
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.core.management.base import BaseCommand from windmill.authoring import djangotest import sys, os from time import sleep import types import logging class ServerContainer(object): start_test_server = djangotest.start_test_server stop_test_server = djangotest.stop_test_server def attempt_import(name, suffix): try: mod = __import__(name+'.'+suffix) except ImportError: mod = None if mod is not None: s = name.split('.') mod = __import__(s.pop(0)) for x in s+[suffix]: mod = getattr(mod, x) return mod class Command(BaseCommand): help = "Run windmill tests. Specify a browser, if one is not passed Firefox will be used" args = '<label label ...>' label = 'label' def handle(self, *labels, **options): from windmill.conf import global_settings from windmill.authoring.djangotest import WindmillDjangoUnitTest if 'ie' in labels: global_settings.START_IE = True sys.argv.remove('ie') elif 'safari' in labels: global_settings.START_SAFARI = True sys.argv.remove('safari') elif 'chrome' in labels: global_settings.START_CHROME = True sys.argv.remove('chrome') else: global_settings.START_FIREFOX = True if 'firefox' in labels: sys.argv.remove('firefox') if 'manage.py' in sys.argv: sys.argv.remove('manage.py') if 'test_windmill' in sys.argv: sys.argv.remove('test_windmill') server_container = ServerContainer() server_container.start_test_server() global_settings.TEST_URL = 'http://localhost:%d' % server_container.server_thread.port # import windmill # windmill.stdout, windmill.stdin = sys.stdout, sys.stdin from windmill.authoring import setup_module, teardown_module from django.conf import settings tests = [] for name in settings.INSTALLED_APPS: for suffix in ['tests', 'wmtests', 'windmilltests']: x = attempt_import(name, suffix) if x is not None: tests.append((suffix,x,)); wmtests = [] for (ttype, mod,) in tests: if ttype == 'tests': for ucls in [getattr(mod, x) for x in dir(mod) if ( type(getattr(mod, x, None)) in (types.ClassType, types.TypeType) ) and issubclass(getattr(mod, x), WindmillDjangoUnitTest) ]: wmtests.append(ucls.test_dir) else: if mod.__file__.endswith('__init__.py') or mod.__file__.endswith('__init__.pyc'): wmtests.append(os.path.join(*os.path.split(os.path.abspath(mod.__file__))[:-1])) else: wmtests.append(os.path.abspath(mod.__file__)) if len(wmtests) is 0: print 'Sorry, no windmill tests found.' else: testtotals = {} x = logging.getLogger() x.setLevel(0) from windmill.server.proxy import logger from functest import bin from functest import runner runner.CLIRunner.final = classmethod(lambda self, totals: testtotals.update(totals) ) import windmill setup_module(tests[0][1]) sys.argv = sys.argv + wmtests bin.cli() teardown_module(tests[0][1]) if testtotals['fail'] is not 0: sleep(.5) sys.exit(1)
Gabotero/GNURadioNext
refs/heads/master
gr-trellis/examples/python/test_sccc_hard.py
13
#!/usr/bin/env python from gnuradio import gr from gnuradio import trellis, digital, blocks from gnuradio import eng_notation import math import sys import random import fsm_utils try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed): tb = gr.top_block () # TX src = blocks.lfsr_32k_source_s() src_head = blocks.head(gr.sizeof_short,Kb/16) # packet size in shorts s2fsmi = blocks.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the outer FSM input cardinality enc_out = trellis.encoder_ss(fo,0) # initial state = 0 inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short) enc_in = trellis.encoder_ss(fi,0) # initial state = 0 mod = digital.chunks_to_symbols_sf(constellation,dimensionality) # CHANNEL add = blocks.add_ff() noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),seed) # RX metrics_in = trellis.metrics_f(fi.O(),dimensionality,constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner Viterbi va_in = trellis.viterbi_s(fi,K,0,-1) # Put -1 if the Initial/Final states are not set. deinter = trellis.permutation(interleaver.K(),interleaver.DEINTER(),1,gr.sizeof_short) metrics_out = trellis.metrics_s(fo.O(),1,[0,1,2,3],digital.TRELLIS_HARD_SYMBOL) # data preprocessing to generate metrics for outer Viterbi (hard decisions) va_out = trellis.viterbi_s(fo,K,0,-1) # Put -1 if the Initial/Final states are not set. fsmi2s = blocks.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts dst = blocks.check_lfsr_32k_s() tb.connect (src,src_head,s2fsmi,enc_out,inter,enc_in,mod) tb.connect (mod,(add,0)) tb.connect (noise,(add,1)) tb.connect (add,metrics_in) tb.connect (metrics_in,va_in,deinter,metrics_out,va_out,fsmi2s,dst) tb.run() ntotal = dst.ntotal () nright = dst.nright () runlength = dst.runlength () return (ntotal,ntotal-nright) def main(args): nargs = len (args) if nargs == 4: fname_out=args[0] fname_in=args[1] esn0_db=float(args[2]) # Es/No in dB rep=int(args[3]) # number of times the experiment is run to collect enough errors else: sys.stderr.write ('usage: test_tcm.py fsm_name_out fsm_fname_in Es/No_db repetitions\n') sys.exit (1) # system parameters Kb=1024*16 # packet size in bits (make it multiple of 16 so it can be packed in a short) fo=trellis.fsm(fname_out) # get the outer FSM specification from a file fi=trellis.fsm(fname_in) # get the innner FSM specification from a file bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol if fo.O() != fi.I(): sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n') sys.exit (1) K=Kb/bitspersymbol # packet size in trellis steps interleaver=trellis.interleaver(K,666) # construct a random interleaver modulation = fsm_utils.psk8 # see fsm_utlis.py for available predefined modulations dimensionality = modulation[0] constellation = modulation[1] if len(constellation)/dimensionality != fi.O(): sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n') sys.exit (1) # calculate average symbol energy Es = 0 for i in range(len(constellation)): Es = Es + constellation[i]**2 Es = Es / (len(constellation)/dimensionality) N0=Es/pow(10.0,esn0_db/10.0); # calculate noise variance tot_s=0 # total number of transmitted shorts terr_s=0 # total number of shorts in error terr_p=0 # total number of packets in error for i in range(rep): (s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,constellation,N0,-long(666+i)) # run experiment with different seed to get different noise realizations tot_s=tot_s+s terr_s=terr_s+e terr_p=terr_p+(terr_s!=0) if ((i+1)%100==0) : # display progress print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s) # estimate of the (short or bit) error rate print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s) if __name__ == '__main__': main (sys.argv[1:])
bussiere/gitfs
refs/heads/master
tests/cache/__init__.py
36
# Copyright 2014 PressLabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
MER-GROUP/intellij-community
refs/heads/master
python/lib/Lib/site-packages/django/contrib/databrowse/sites.py
329
from django import http from django.db import models from django.contrib.databrowse.datastructures import EasyModel from django.shortcuts import render_to_response from django.utils.safestring import mark_safe class AlreadyRegistered(Exception): pass class NotRegistered(Exception): pass class DatabrowsePlugin(object): def urls(self, plugin_name, easy_instance_field): """ Given an EasyInstanceField object, returns a list of URLs for this plugin's views of this object. These URLs should be absolute. Returns None if the EasyInstanceField object doesn't get a list of plugin-specific URLs. """ return None def model_index_html(self, request, model, site): """ Returns a snippet of HTML to include on the model index page. """ return '' def model_view(self, request, model_databrowse, url): """ Handles main URL routing for a plugin's model-specific pages. """ raise NotImplementedError class ModelDatabrowse(object): plugins = {} def __init__(self, model, site): self.model = model self.site = site def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'objects/3'. """ # Delegate to the appropriate method, based on the URL. if url is None: return self.main_view(request) try: plugin_name, rest_of_url = url.split('/', 1) except ValueError: # need more than 1 value to unpack plugin_name, rest_of_url = url, None try: plugin = self.plugins[plugin_name] except KeyError: raise http.Http404('A plugin with the requested name does not exist.') return plugin.model_view(request, self, rest_of_url) def main_view(self, request): easy_model = EasyModel(self.site, self.model) html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()])) return render_to_response('databrowse/model_detail.html', { 'model': easy_model, 'root_url': self.site.root_url, 'plugin_html': html_snippets, }) class DatabrowseSite(object): def __init__(self): self.registry = {} # model_class -> databrowse_class self.root_url = None def register(self, model_or_iterable, databrowse_class=None, **options): """ Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered. """ databrowse_class = databrowse_class or DefaultModelDatabrowse if issubclass(model_or_iterable, models.Model): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model in self.registry: raise AlreadyRegistered('The model %s is already registered' % model.__name__) self.registry[model] = databrowse_class def unregister(self, model_or_iterable): """ Unregisters the given model(s). If a model isn't already registered, this will raise NotRegistered. """ if issubclass(model_or_iterable, models.Model): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model not in self.registry: raise NotRegistered('The model %s is not registered' % model.__name__) del self.registry[model] def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'. """ self.root_url = request.path[:len(request.path) - len(url)] url = url.rstrip('/') # Trim trailing slash, if it exists. if url == '': return self.index(request) elif '/' in url: return self.model_page(request, *url.split('/', 2)) raise http.Http404('The requested databrowse page does not exist.') def index(self, request): m_list = [EasyModel(self, m) for m in self.registry.keys()] return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url}) def model_page(self, request, app_label, model_name, rest_of_url=None): """ Handles the model-specific functionality of the databrowse site, delegating to the appropriate ModelDatabrowse class. """ model = models.get_model(app_label, model_name) if model is None: raise http.Http404("App %r, model %r, not found." % (app_label, model_name)) try: databrowse_class = self.registry[model] except KeyError: raise http.Http404("This model exists but has not been registered with databrowse.") return databrowse_class(model, self).root(request, rest_of_url) site = DatabrowseSite() from django.contrib.databrowse.plugins.calendars import CalendarPlugin from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin class DefaultModelDatabrowse(ModelDatabrowse): plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
hnikolov/pihdf_doc
refs/heads/master
_python/filters.py
20
def dateformat(value, format="%d-%b-%Y"): return value.strftime(format) filters = {} filters['dateformat'] = dateformat
GHackAnonymous/qemu
refs/heads/master
scripts/qmp/qmp.py
21
# QEMU Monitor Protocol Python class # # Copyright (C) 2009, 2010 Red Hat Inc. # # Authors: # Luiz Capitulino <lcapitulino@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. import json import errno import socket class QMPError(Exception): pass class QMPConnectError(QMPError): pass class QMPCapabilitiesError(QMPError): pass class QMPTimeoutError(QMPError): pass class QEMUMonitorProtocol: def __init__(self, address, server=False): """ Create a QEMUMonitorProtocol class. @param address: QEMU address, can be either a unix socket path (string) or a tuple in the form ( address, port ) for a TCP connection @param server: server mode listens on the socket (bool) @raise socket.error on socket connection errors @note No connection is established, this is done by the connect() or accept() methods """ self.__events = [] self.__address = address self.__sock = self.__get_sock() if server: self.__sock.bind(self.__address) self.__sock.listen(1) def __get_sock(self): if isinstance(self.__address, tuple): family = socket.AF_INET else: family = socket.AF_UNIX return socket.socket(family, socket.SOCK_STREAM) def __negotiate_capabilities(self): greeting = self.__json_read() if greeting is None or not greeting.has_key('QMP'): raise QMPConnectError # Greeting seems ok, negotiate capabilities resp = self.cmd('qmp_capabilities') if "return" in resp: return greeting raise QMPCapabilitiesError def __json_read(self, only_event=False): while True: data = self.__sockfile.readline() if not data: return resp = json.loads(data) if 'event' in resp: self.__events.append(resp) if not only_event: continue return resp error = socket.error def __get_events(self, wait=False): """ Check for new events in the stream and cache them in __events. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. """ # Check for new events regardless and pull them into the cache: self.__sock.setblocking(0) try: self.__json_read() except socket.error, err: if err[0] == errno.EAGAIN: # No data available pass self.__sock.setblocking(1) # Wait for new events, if needed. # if wait is 0.0, this means "no wait" and is also implicitly false. if not self.__events and wait: if isinstance(wait, float): self.__sock.settimeout(wait) try: ret = self.__json_read(only_event=True) except socket.timeout: raise QMPTimeoutError("Timeout waiting for event") except: raise QMPConnectError("Error while reading from socket") if ret is None: raise QMPConnectError("Error while reading from socket") self.__sock.settimeout(None) def connect(self, negotiate=True): """ Connect to the QMP Monitor and perform capabilities negotiation. @return QMP greeting dict @raise socket.error on socket connection errors @raise QMPConnectError if the greeting is not received @raise QMPCapabilitiesError if fails to negotiate capabilities """ self.__sock.connect(self.__address) self.__sockfile = self.__sock.makefile() if negotiate: return self.__negotiate_capabilities() def accept(self): """ Await connection from QMP Monitor and perform capabilities negotiation. @return QMP greeting dict @raise socket.error on socket connection errors @raise QMPConnectError if the greeting is not received @raise QMPCapabilitiesError if fails to negotiate capabilities """ self.__sock, _ = self.__sock.accept() self.__sockfile = self.__sock.makefile() return self.__negotiate_capabilities() def cmd_obj(self, qmp_cmd): """ Send a QMP command to the QMP Monitor. @param qmp_cmd: QMP command to be sent as a Python dict @return QMP response as a Python dict or None if the connection has been closed """ try: self.__sock.sendall(json.dumps(qmp_cmd)) except socket.error, err: if err[0] == errno.EPIPE: return raise socket.error(err) return self.__json_read() def cmd(self, name, args=None, id=None): """ Build a QMP command and send it to the QMP Monitor. @param name: command name (string) @param args: command arguments (dict) @param id: command id (dict, list, string or int) """ qmp_cmd = { 'execute': name } if args: qmp_cmd['arguments'] = args if id: qmp_cmd['id'] = id return self.cmd_obj(qmp_cmd) def command(self, cmd, **kwds): ret = self.cmd(cmd, kwds) if ret.has_key('error'): raise Exception(ret['error']['desc']) return ret['return'] def pull_event(self, wait=False): """ Get and delete the first available QMP event. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. @return The first available QMP event, or None. """ self.__get_events(wait) if self.__events: return self.__events.pop(0) return None def get_events(self, wait=False): """ Get a list of available QMP events. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. @return The list of available QMP events. """ self.__get_events(wait) return self.__events def clear_events(self): """ Clear current list of pending events. """ self.__events = [] def close(self): self.__sock.close() self.__sockfile.close() timeout = socket.timeout def settimeout(self, timeout): self.__sock.settimeout(timeout) def get_sock_fd(self): return self.__sock.fileno() def is_scm_available(self): return self.__sock.family == socket.AF_UNIX
benpatterson/edx-platform
refs/heads/master
common/test/acceptance/pages/lms/tab_nav.py
112
""" High-level tab navigation. """ from bok_choy.page_object import PageObject from bok_choy.promise import Promise, EmptyPromise class TabNavPage(PageObject): """ High-level tab navigation. """ url = None def is_browser_on_page(self): return self.q(css='ol.course-tabs').present def go_to_tab(self, tab_name): """ Navigate to the tab `tab_name`. """ if tab_name not in ['Courseware', 'Course Info', 'Discussion', 'Wiki', 'Progress']: self.warning("'{0}' is not a valid tab name".format(tab_name)) # The only identifier for individual tabs is the link href # so we find the tab with `tab_name` in its text. tab_css = self._tab_css(tab_name) if tab_css is not None: self.q(css=tab_css).first.click() else: self.warning("No tabs found for '{0}'".format(tab_name)) self.wait_for_page() self._is_on_tab_promise(tab_name).fulfill() def is_on_tab(self, tab_name): """ Return a boolean indicating whether the current tab is `tab_name`. Because this is a public method, it checks that we're on the right page before accessing the DOM. """ return self._is_on_tab(tab_name) def _tab_css(self, tab_name): """ Return the CSS to click for `tab_name`. If no tabs exist for that name, return `None`. """ all_tabs = self.tab_names try: tab_index = all_tabs.index(tab_name) except ValueError: return None else: return 'ol.course-tabs li:nth-of-type({0}) a'.format(tab_index + 1) @property def tab_names(self): """ Return the list of available tab names. If no tab names are available, wait for them to load. Raises a `BrokenPromiseError` if the tab names fail to load. """ def _check_func(): tab_names = self.q(css='ol.course-tabs li a').text return (len(tab_names) > 0, tab_names) return Promise(_check_func, "Get all tab names").fulfill() def _is_on_tab(self, tab_name): """ Return a boolean indicating whether the current tab is `tab_name`. This is a private method, so it does NOT enforce the page check, which is what we want when we're polling the DOM in a promise. """ current_tab_list = self.q(css='ol.course-tabs > li > a.active').text if len(current_tab_list) == 0: self.warning("Could not find current tab") return False else: return current_tab_list[0].strip().split('\n')[0] == tab_name def _is_on_tab_promise(self, tab_name): """ Return a `Promise` that the user is on the tab `tab_name`. """ # Use the private version of _is_on_tab to skip the page check return EmptyPromise( lambda: self._is_on_tab(tab_name), "{0} is the current tab".format(tab_name) )
Bashar/django
refs/heads/master
django/db/backends/mysql/client.py
84
import os import sys from django.db.backends import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = 'mysql' def runshell(self): settings_dict = self.connection.settings_dict args = [self.executable_name] db = settings_dict['OPTIONS'].get('db', settings_dict['NAME']) user = settings_dict['OPTIONS'].get('user', settings_dict['USER']) passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD']) host = settings_dict['OPTIONS'].get('host', settings_dict['HOST']) port = settings_dict['OPTIONS'].get('port', settings_dict['PORT']) defaults_file = settings_dict['OPTIONS'].get('read_default_file') # Seems to be no good way to set sql_mode with CLI. if defaults_file: args += ["--defaults-file=%s" % defaults_file] if user: args += ["--user=%s" % user] if passwd: args += ["--password=%s" % passwd] if host: if '/' in host: args += ["--socket=%s" % host] else: args += ["--host=%s" % host] if port: args += ["--port=%s" % port] if db: args += [db] if os.name == 'nt': sys.exit(os.system(" ".join(args))) else: os.execvp(self.executable_name, args)
wayneicn/crazyflie-clients-python
refs/heads/master
lib/cflib/utils/__init__.py
40
#!/usr/bin/env python # -*- coding: utf-8 -*- # # || ____ _ __ # +------+ / __ )(_) /_______________ _____ ___ # | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \ # +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/ # || || /_____/_/\__/\___/_/ \__,_/ /___/\___/ # # Copyright (C) 2011-2013 Bitcraze AB # # Crazyflie Nano Quadcopter Client # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Various utilities that is needed by the cflib. """
HarmonyEnterpriseSolutions/harmony-platform
refs/heads/master
src/gnue/common/formatting/masks/MaskTokenizer.py
2
# # This file is part of GNU Enterprise. # # GNU Enterprise is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2, or (at your option) any later version. # # GNU Enterprise is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with program; see the file COPYING. If not, # write to the Free Software Foundation, Inc., 59 Temple Place # - Suite 330, Boston, MA 02111-1307, USA. # # Copyright 2001-2007 Free Software Foundation # # pylint: disable-msg=W0402, # W0402 is disabled as the only obsolete module (as of 4/12/06) # is the string module which is used for string.digits # # FILE: # MaskParser # # DESCRIPTION: """ A parser which takes a text based mask and generates a list of token class instances that represent that mask. Any repeaters found in the mask ({3}) are replaced with the appropriate number of tokens so that the mask class will not have to deal with them. Valid tokens include: """ __revision__ = "$Id$" import string import StringIO from gnue.common.external.plex import \ Scanner, Lexicon, Errors, \ Str, Begin, State, AnyChar, Rep1, Any from gnue.common.formatting.masks.Errors import MaskDefinitionError from gnue.common.formatting.masks import MaskTokens class MaskTokenizer(Scanner): """ Custom plex scanner used to contstruct a token list which represents an input mask. This token list is used by the input mask to define valid input for each position of the input. Takes a file handle containing an input mask and creates a list of Tokens which define the input mask """ def get_type(self): """ Returns the apparent type of this mask. @rtype: string @return: The value 'text', 'numeric', or 'date' """ return self.type def get_tokens(self): """ Returns a list of the tokens after parsing the input mask. @rtype: list @return: The list of tokens """ return self.tokens[:] # ========================================================================= # Private stuff # ========================================================================= # ------------------------------------------------------------------------- # Lexicon action functions # ------------------------------------------------------------------------- def _check_single(self, text): """ Function to add single instance tokens to the input mask. A single instance token is something that can appear only once in an input mask. """ if text in self.__singles: raise Errors.UnrecognizedInput(self, \ 'Mask can only have one "%s" token' %text) self.__singles.append(text) if text == '!': self.produce (MaskTokens.RightToLeft(text)) elif text in '.+,': self.produce(MaskTokens.NumberToken(text)) else: self.produce(MaskTokens.TextToken(text)) def _literal(self, text): """ A text literal that should appear as is in the mask """ self.produce(MaskTokens.Literal(text)) def _literal_2nd(self, text): """ Closes the literal string """ return self._literal(text[1:]) def _escape(self, text): """ An escaped character such as \$ to display a $ """ self.begin('') self.produce(MaskTokens.Literal(text)) def _repeater(self, text): """ Action to process an input mask repeater. A repeater tells the parser to repeat the previous token a specified number of times. @param text: The value pulled from between the {} which denotes the number of times to repeat. """ self.produce(MaskTokens.Repeater(int(text))) def _begin_set(self, text): """ Action to process the start of a set of valid characters. The scanner will be placed into set state and the list of valid characters will be reset. """ self.begin('set') self._set = "" def _add_set(self, text): """ Action to add a character to the set currently being constructed. Only called when the scanner is in state "set". The character read will be added to the character sting containing the possible valid values. """ self._set += text def _add_set_2nd(self, text): """ Action to add a special character to a set being built. Used when an escaped set character \[ or \] is found in the list of valid characters to be added to the set """ return self._add_set(text[1:]) def _end_set(self, text): """ Action to process the end of a set. Only called when the scanner is in state "set". The list of possible characters that were defined in the set will be used to build an instance of a TokenSet class. As part of this function the scanner will set to default state. """ self.begin('') self.produce(MaskTokens.TokenSet(self._set)) # ========================================================================= # Lexicon defintions # ========================================================================= # # ------------------------------------------------------------------------- # Base Lexicon definition # ------------------------------------------------------------------------- # This lexicon is the base used by all masks # _lexicon = [ # --------------------------------------------------------------------- # Default state definitions # --------------------------------------------------------------------- (Str('\\'), Begin('escape')), # found \, set state to escape # (Str("'"), Begin('quoted')), # found ', set state to quoted # (Str('"'), Begin('quoted2')), # found ", set state to qoute2 # (Str('{'), Begin('repeater')), # found {, set state to # repeater # (Str('['), _begin_set), # found [, execute _begin_set # the function will set state # to set when executed # (Str(' '), MaskTokens.Literal),# found a space, return a # literal char instance # (Any('+.,'), _check_single), # these characters can appear # only once in an input mask # (Any('_?AaLlCc'), MaskTokens.TextToken), # found a text character # return a text token # instance # (Any('MDYyHISPp:/'), MaskTokens.DateToken), # found a date character # return a date token # instance # (Any('#0'), MaskTokens.NumberToken), # found a number character # return a number token # instance # (Any('<>'), MaskTokens.CaseModifier), # found a case modifier # return case modifier # instance # --------------------------------------------------------------------- # Escape State # --------------------------------------------------------------------- # The escape state is entered whenever a backslash is encountered while # in the default state. It's purpose is to allow the placement of what # would normally be reserved characters into the input mask # State('escape', [ (AnyChar, _escape), # No matter which character is next # execute _escape, the function will # create a literal instance and set # the state back to default ]), # --------------------------------------------------------------------- # Quoted state # --------------------------------------------------------------------- # The quoted state is entered whenever a single quote is encountered # thile in the default state. It's purpose is to allow quoted strings # inside the input mask to sent through as their literal value # State('quoted', [ (Str("\\")+Str("'"), _literal_2nd), # Handle \' in the string (Str("'"), Begin('')), # found ', set state to default (AnyChar, _literal) # Process as literal character ]), # --------------------------------------------------------------------- # quote2 state # --------------------------------------------------------------------- # This works the exact same way as the quoted state but is used # when a double quote is encountered. ' and " get seperate states # so that one type can always enclose the other # # Example : "Today's date: " # State('quoted2', [ (Str("\\")+Str('"'), _literal_2nd), # Handle \" in the string (Str('"'), Begin('')), # found ", set state to default (AnyChar, _literal) # Process as literal character ]), # --------------------------------------------------------------------- # repeater state # --------------------------------------------------------------------- # The repeater state is entered whenever a { is encountered # while in the default state. This state allows an input # mask to include a number inside of {} to cause the previous # token to repeat # # Example : A{5} is the same as AAAAA # State('repeater', [ (Str('}'), Begin('')),# found }, set state to # default (Rep1(Any(string.digits)), _repeater) # grab all digits inside # the {} execute _repeater, # the function will recreate # a repeater instance # containing the obtained # number ]), # --------------------------------------------------------------------- # Set state # --------------------------------------------------------------------- # The set state is entered whenever a [ is encountered while in the # default state. This provides basic regex set support where any # character inside the [] is matched. # # Example : [ABCDEF] # State('set', [ (Str("\\")+Any('[]'), _add_set_2nd), # (Str(']'), _end_set), # (AnyChar, _add_set) # ]), ] # ------------------------------------------------------------------------- # Additional lexicon definitions for input masks # ------------------------------------------------------------------------- _extra_lexicon = [ (Any('!'), _check_single), ] def __process(self, token): """ Adds a token class instance to this instances list of tokens. As token instances are generated from the input mask they are processed and then added to the scanners working list of tokens. Special tokens such as repeater and case modifiers are processed during this state. """ if isinstance(token, MaskTokens.Repeater): # If the incoming token is a repeater then replace # the repeater with the appropriate number of the # previous token. for unused in range(0, token.count-1): self.__process(self.__last) elif isinstance(token, MaskTokens.CaseModifier): # If then incomming token is a case modifier # then add the modifier token to the list of # modifiers stored in the scanner self.__modify.append(token) else: # Standard tokens if self.__modify and isinstance(token, MaskTokens.TextToken): # If a case modifier is stored and the incoming # token is text then force case based upon the # modifier mod = self.__modify.pop(0) if mod.token == '<': token.force_upper = True elif mod.token == '>': token.force_lower = True self.tokens.append(token) # TODO: Should this be storing modifiers and the like? It is. self.__last = token def __init__(self, mask_text, name): """ Input mask scanner constructor. The input mask scanner will create a list of class instances that describe the input mask. @type mask_text: string @param mask_text: The text to be used as the mask @type name: string @param name: The name of the input mask(TODO: ?) """ self._set = "" self.__singles = [] self.tokens = [] self.__last = None # The last token generated from the input mask self.__modify = [] mask = StringIO.StringIO(mask_text) # --------------------------------------------------------------------- # Read the input mask and convert into instances of Token classes # --------------------------------------------------------------------- try: Scanner.__init__(self, Lexicon(self._lexicon + self._extra_lexicon), mask, name) while True: token, unused = self.read() if token is None: break # Process the returned token self.__process(token) except Errors.PlexError, msg: raise MaskDefinitionError, msg if self.__modify: print "WARNING: Modifier found at end of mask." # --------------------------------------------------------------------- # Build a count of the various token types created during parsing # --------------------------------------------------------------------- # num_markers = 0 # Number of numeric token instances found date_markers = 0 # Number of date token instances found text_markers = 0 # Number of text token instances found rtl_pos = -1 # Right to left token # TODO: Unknown functionality at this time for (position, token) in enumerate(self.tokens): if isinstance(token, MaskTokens.RightToLeft): rtl_pos = position if not isinstance(token, MaskTokens.Literal): if token.numeric: num_markers += 1 elif token.date: date_markers += 1 else: text_markers += 1 # Check for "!" in non-numeric mask if rtl_pos >= 0: self.tokens.pop(rtl_pos) else: rtl_pos = 0 self.rtl_pos = rtl_pos # --------------------------------------------------------------------- # Check for errors and mixed marker types # --------------------------------------------------------------------- # # TODO: I'm not sure we should block mixed input types # #if not (num_markers or date_markers or text_markers): #raise MaskDefinitionError, 'Mask has no character tokens' #if (num_markers) and (date_markers or text_markers): #raise MaskDefinitionError, \ #'Numeric mask %s has non-numeric tokens' % mask_text #if (date_markers) and (num_markers or text_markers): #raise MaskDefinitionError, 'Date/Time mask has non-date tokens' # --------------------------------------------------------------------- # Set the type of parser based upon the marker counts # --------------------------------------------------------------------- # If any two of these are non-zero, then the mask is a text mask, # not date or numeric. # if (num_markers and date_markers) or text_markers: self.type = 'text' elif num_markers: self.type = 'numeric' else: self.type = 'date'
EVERROCKET/mobile-data
refs/heads/master
lib/oauth2client/keyring_storage.py
52
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A keyring based Storage. A Storage for Credentials that uses the keyring module. """ import threading import keyring from oauth2client.client import Credentials from oauth2client.client import Storage as BaseStorage __author__ = 'jcgregorio@google.com (Joe Gregorio)' class Storage(BaseStorage): """Store and retrieve a single credential to and from the keyring. To use this module you must have the keyring module installed. See <http://pypi.python.org/pypi/keyring/>. This is an optional module and is not installed with oauth2client by default because it does not work on all the platforms that oauth2client supports, such as Google App Engine. The keyring module <http://pypi.python.org/pypi/keyring/> is a cross-platform library for access the keyring capabilities of the local system. The user will be prompted for their keyring password when this module is used, and the manner in which the user is prompted will vary per platform. Usage:: from oauth2client.keyring_storage import Storage s = Storage('name_of_application', 'user1') credentials = s.get() """ def __init__(self, service_name, user_name): """Constructor. Args: service_name: string, The name of the service under which the credentials are stored. user_name: string, The name of the user to store credentials for. """ self._service_name = service_name self._user_name = user_name self._lock = threading.Lock() def acquire_lock(self): """Acquires any lock necessary to access this Storage. This lock is not reentrant. """ self._lock.acquire() def release_lock(self): """Release the Storage lock. Trying to release a lock that isn't held will result in a RuntimeError. """ self._lock.release() def locked_get(self): """Retrieve Credential from file. Returns: oauth2client.client.Credentials """ credentials = None content = keyring.get_password(self._service_name, self._user_name) if content is not None: try: credentials = Credentials.new_from_json(content) credentials.set_store(self) except ValueError: pass return credentials def locked_put(self, credentials): """Write Credentials to file. Args: credentials: Credentials, the credentials to store. """ keyring.set_password(self._service_name, self._user_name, credentials.to_json()) def locked_delete(self): """Delete Credentials file. Args: credentials: Credentials, the credentials to store. """ keyring.set_password(self._service_name, self._user_name, '')
tempbottle/kbengine
refs/heads/master
kbe/res/scripts/common/Lib/importlib/_bootstrap.py
76
"""Core implementation of import. This module is NOT meant to be directly imported! It has been designed such that it can be bootstrapped into Python as the implementation of import. As such it requires the injection of specific modules and attributes in order to work. One should use importlib as the public-facing version of this module. """ # # IMPORTANT: Whenever making changes to this module, be sure to run # a top-level make in order to get the frozen version of the module # update. Not doing so will result in the Makefile to fail for # all others who don't have a ./python around to freeze the module # in the early stages of compilation. # # See importlib._setup() for what is injected into the global namespace. # When editing this code be aware that code executed at import time CANNOT # reference any injected objects! This includes not only global code but also # anything specified at the class level. # Bootstrap-related code ###################################################### _CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin' def _make_relax_case(): if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): def _relax_case(): """True if filenames must be checked case-insensitively.""" return b'PYTHONCASEOK' in _os.environ else: def _relax_case(): """True if filenames must be checked case-insensitively.""" return False return _relax_case def _w_long(x): """Convert a 32-bit integer to little-endian.""" return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') def _r_long(int_bytes): """Convert 4 bytes in little-endian to an integer.""" return int.from_bytes(int_bytes, 'little') def _path_join(*path_parts): """Replacement for os.path.join().""" return path_sep.join([part.rstrip(path_separators) for part in path_parts if part]) def _path_split(path): """Replacement for os.path.split().""" if len(path_separators) == 1: front, _, tail = path.rpartition(path_sep) return front, tail for x in reversed(path): if x in path_separators: front, tail = path.rsplit(x, maxsplit=1) return front, tail return '', path def _path_stat(path): """Stat the path. Made a separate function to make it easier to override in experiments (e.g. cache stat results). """ return _os.stat(path) def _path_is_mode_type(path, mode): """Test whether the path is the specified mode type.""" try: stat_info = _path_stat(path) except OSError: return False return (stat_info.st_mode & 0o170000) == mode def _path_isfile(path): """Replacement for os.path.isfile.""" return _path_is_mode_type(path, 0o100000) def _path_isdir(path): """Replacement for os.path.isdir.""" if not path: path = _os.getcwd() return _path_is_mode_type(path, 0o040000) def _write_atomic(path, data, mode=0o666): """Best-effort function to write data to a path atomically. Be prepared to handle a FileExistsError if concurrent writing of the temporary file is attempted.""" # id() is used to generate a pseudo-random filename. path_tmp = '{}.{}'.format(path, id(path)) fd = _os.open(path_tmp, _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) try: # We first write data to a temporary file, and then use os.replace() to # perform an atomic rename. with _io.FileIO(fd, 'wb') as file: file.write(data) _os.replace(path_tmp, path) except OSError: try: _os.unlink(path_tmp) except OSError: pass raise def _wrap(new, old): """Simple substitute for functools.update_wrapper.""" for replace in ['__module__', '__name__', '__qualname__', '__doc__']: if hasattr(old, replace): setattr(new, replace, getattr(old, replace)) new.__dict__.update(old.__dict__) def _new_module(name): return type(sys)(name) _code_type = type(_wrap.__code__) class _ManageReload: """Manages the possible clean-up of sys.modules for load_module().""" def __init__(self, name): self._name = name def __enter__(self): self._is_reload = self._name in sys.modules def __exit__(self, *args): if any(arg is not None for arg in args) and not self._is_reload: try: del sys.modules[self._name] except KeyError: pass # Module-level locking ######################################################## # A dict mapping module names to weakrefs of _ModuleLock instances _module_locks = {} # A dict mapping thread ids to _ModuleLock instances _blocking_on = {} class _DeadlockError(RuntimeError): pass class _ModuleLock: """A recursive lock implementation which is able to detect deadlocks (e.g. thread 1 trying to take locks A then B, and thread 2 trying to take locks B then A). """ def __init__(self, name): self.lock = _thread.allocate_lock() self.wakeup = _thread.allocate_lock() self.name = name self.owner = None self.count = 0 self.waiters = 0 def has_deadlock(self): # Deadlock avoidance for concurrent circular imports. me = _thread.get_ident() tid = self.owner while True: lock = _blocking_on.get(tid) if lock is None: return False tid = lock.owner if tid == me: return True def acquire(self): """ Acquire the module lock. If a potential deadlock is detected, a _DeadlockError is raised. Otherwise, the lock is always acquired and True is returned. """ tid = _thread.get_ident() _blocking_on[tid] = self try: while True: with self.lock: if self.count == 0 or self.owner == tid: self.owner = tid self.count += 1 return True if self.has_deadlock(): raise _DeadlockError('deadlock detected by %r' % self) if self.wakeup.acquire(False): self.waiters += 1 # Wait for a release() call self.wakeup.acquire() self.wakeup.release() finally: del _blocking_on[tid] def release(self): tid = _thread.get_ident() with self.lock: if self.owner != tid: raise RuntimeError('cannot release un-acquired lock') assert self.count > 0 self.count -= 1 if self.count == 0: self.owner = None if self.waiters: self.waiters -= 1 self.wakeup.release() def __repr__(self): return '_ModuleLock({!r}) at {}'.format(self.name, id(self)) class _DummyModuleLock: """A simple _ModuleLock equivalent for Python builds without multi-threading support.""" def __init__(self, name): self.name = name self.count = 0 def acquire(self): self.count += 1 return True def release(self): if self.count == 0: raise RuntimeError('cannot release un-acquired lock') self.count -= 1 def __repr__(self): return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self)) class _ModuleLockManager: def __init__(self, name): self._name = name self._lock = None def __enter__(self): try: self._lock = _get_module_lock(self._name) finally: _imp.release_lock() self._lock.acquire() def __exit__(self, *args, **kwargs): self._lock.release() # The following two functions are for consumption by Python/import.c. def _get_module_lock(name): """Get or create the module lock for a given module name. Should only be called with the import lock taken.""" lock = None try: lock = _module_locks[name]() except KeyError: pass if lock is None: if _thread is None: lock = _DummyModuleLock(name) else: lock = _ModuleLock(name) def cb(_): del _module_locks[name] _module_locks[name] = _weakref.ref(lock, cb) return lock def _lock_unlock_module(name): """Release the global import lock, and acquires then release the module lock for a given module name. This is used to ensure a module is completely initialized, in the event it is being imported by another thread. Should only be called with the import lock taken.""" lock = _get_module_lock(name) _imp.release_lock() try: lock.acquire() except _DeadlockError: # Concurrent circular import, we'll accept a partially initialized # module object. pass else: lock.release() # Frame stripping magic ############################################### def _call_with_frames_removed(f, *args, **kwds): """remove_importlib_frames in import.c will always remove sequences of importlib frames that end with a call to this function Use it instead of a normal call in places where including the importlib frames introduces unwanted noise into the traceback (e.g. when executing module code) """ return f(*args, **kwds) # Finder/loader utility code ############################################### # Magic word to reject .pyc files generated by other Python versions. # It should change for each incompatible change to the bytecode. # # The value of CR and LF is incorporated so if you ever read or write # a .pyc file in text mode the magic number will be wrong; also, the # Apple MPW compiler swaps their values, botching string constants. # # The magic numbers must be spaced apart at least 2 values, as the # -U interpeter flag will cause MAGIC+1 being used. They have been # odd numbers for some time now. # # There were a variety of old schemes for setting the magic number. # The current working scheme is to increment the previous value by # 10. # # Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic # number also includes a new "magic tag", i.e. a human readable string used # to represent the magic number in __pycache__ directories. When you change # the magic number, you must also set a new unique magic tag. Generally this # can be named after the Python major version of the magic number bump, but # it can really be anything, as long as it's different than anything else # that's come before. The tags are included in the following table, starting # with Python 3.2a0. # # Known values: # Python 1.5: 20121 # Python 1.5.1: 20121 # Python 1.5.2: 20121 # Python 1.6: 50428 # Python 2.0: 50823 # Python 2.0.1: 50823 # Python 2.1: 60202 # Python 2.1.1: 60202 # Python 2.1.2: 60202 # Python 2.2: 60717 # Python 2.3a0: 62011 # Python 2.3a0: 62021 # Python 2.3a0: 62011 (!) # Python 2.4a0: 62041 # Python 2.4a3: 62051 # Python 2.4b1: 62061 # Python 2.5a0: 62071 # Python 2.5a0: 62081 (ast-branch) # Python 2.5a0: 62091 (with) # Python 2.5a0: 62092 (changed WITH_CLEANUP opcode) # Python 2.5b3: 62101 (fix wrong code: for x, in ...) # Python 2.5b3: 62111 (fix wrong code: x += yield) # Python 2.5c1: 62121 (fix wrong lnotab with for loops and # storing constants that should have been removed) # Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp) # Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode) # Python 2.6a1: 62161 (WITH_CLEANUP optimization) # Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND) # Python 2.7a0: 62181 (optimize conditional branches: # introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) # Python 2.7a0 62191 (introduce SETUP_WITH) # Python 2.7a0 62201 (introduce BUILD_SET) # Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD) # Python 3000: 3000 # 3010 (removed UNARY_CONVERT) # 3020 (added BUILD_SET) # 3030 (added keyword-only parameters) # 3040 (added signature annotations) # 3050 (print becomes a function) # 3060 (PEP 3115 metaclass syntax) # 3061 (string literals become unicode) # 3071 (PEP 3109 raise changes) # 3081 (PEP 3137 make __file__ and __name__ unicode) # 3091 (kill str8 interning) # 3101 (merge from 2.6a0, see 62151) # 3103 (__file__ points to source file) # Python 3.0a4: 3111 (WITH_CLEANUP optimization). # Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT) # Python 3.1a0: 3141 (optimize list, set and dict comprehensions: # change LIST_APPEND and SET_ADD, add MAP_ADD) # Python 3.1a0: 3151 (optimize conditional branches: # introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) # Python 3.2a0: 3160 (add SETUP_WITH) # tag: cpython-32 # Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR) # tag: cpython-32 # Python 3.2a2 3180 (add DELETE_DEREF) # Python 3.3a0 3190 __class__ super closure changed # Python 3.3a0 3200 (__qualname__ added) # 3210 (added size modulo 2**32 to the pyc header) # Python 3.3a1 3220 (changed PEP 380 implementation) # Python 3.3a4 3230 (revert changes to implicit __class__ closure) # Python 3.4a1 3250 (evaluate positional default arguments before # keyword-only defaults) # Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override # free vars) # Python 3.4a1 3270 (various tweaks to the __class__ closure) # Python 3.4a1 3280 (remove implicit class argument) # Python 3.4a4 3290 (changes to __qualname__ computation) # Python 3.4a4 3300 (more changes to __qualname__ computation) # Python 3.4rc2 3310 (alter __qualname__ computation) # # MAGIC must change whenever the bytecode emitted by the compiler may no # longer be understood by older implementations of the eval loop (usually # due to the addition of new opcodes). MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n' _RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c _PYCACHE = '__pycache__' SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed. DEBUG_BYTECODE_SUFFIXES = ['.pyc'] OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo'] def cache_from_source(path, debug_override=None): """Given the path to a .py file, return the path to its .pyc/.pyo file. The .py file does not need to exist; this simply returns the path to the .pyc/.pyo file calculated as if the .py file were imported. The extension will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo. If debug_override is not None, then it must be a boolean and is used in place of sys.flags.optimize. If sys.implementation.cache_tag is None then NotImplementedError is raised. """ debug = not sys.flags.optimize if debug_override is None else debug_override if debug: suffixes = DEBUG_BYTECODE_SUFFIXES else: suffixes = OPTIMIZED_BYTECODE_SUFFIXES head, tail = _path_split(path) base_filename, sep, _ = tail.partition('.') tag = sys.implementation.cache_tag if tag is None: raise NotImplementedError('sys.implementation.cache_tag is None') filename = ''.join([base_filename, sep, tag, suffixes[0]]) return _path_join(head, _PYCACHE, filename) def source_from_cache(path): """Given the path to a .pyc./.pyo file, return the path to its .py file. The .pyc/.pyo file does not need to exist; this simply returns the path to the .py file calculated to correspond to the .pyc/.pyo file. If path does not conform to PEP 3147 format, ValueError will be raised. If sys.implementation.cache_tag is None then NotImplementedError is raised. """ if sys.implementation.cache_tag is None: raise NotImplementedError('sys.implementation.cache_tag is None') head, pycache_filename = _path_split(path) head, pycache = _path_split(head) if pycache != _PYCACHE: raise ValueError('{} not bottom-level directory in ' '{!r}'.format(_PYCACHE, path)) if pycache_filename.count('.') != 2: raise ValueError('expected only 2 dots in ' '{!r}'.format(pycache_filename)) base_filename = pycache_filename.partition('.')[0] return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) def _get_sourcefile(bytecode_path): """Convert a bytecode file path to a source path (if possible). This function exists purely for backwards-compatibility for PyImport_ExecCodeModuleWithFilenames() in the C API. """ if len(bytecode_path) == 0: return None rest, _, extension = bytecode_path.rpartition('.') if not rest or extension.lower()[-3:-1] != 'py': return bytecode_path try: source_path = source_from_cache(bytecode_path) except (NotImplementedError, ValueError): source_path = bytecode_path[:-1] return source_path if _path_isfile(source_path) else bytecode_path def _calc_mode(path): """Calculate the mode permissions for a bytecode file.""" try: mode = _path_stat(path).st_mode except OSError: mode = 0o666 # We always ensure write access so we can update cached files # later even when the source files are read-only on Windows (#6074) mode |= 0o200 return mode def _verbose_message(message, *args, verbosity=1): """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" if sys.flags.verbose >= verbosity: if not message.startswith(('#', 'import ')): message = '# ' + message print(message.format(*args), file=sys.stderr) def _check_name(method): """Decorator to verify that the module being requested matches the one the loader can handle. The first argument (self) must define _name which the second argument is compared against. If the comparison fails then ImportError is raised. """ def _check_name_wrapper(self, name=None, *args, **kwargs): if name is None: name = self.name elif self.name != name: raise ImportError('loader cannot handle %s' % name, name=name) return method(self, name, *args, **kwargs) _wrap(_check_name_wrapper, method) return _check_name_wrapper def _requires_builtin(fxn): """Decorator to verify the named module is built-in.""" def _requires_builtin_wrapper(self, fullname): if fullname not in sys.builtin_module_names: raise ImportError('{!r} is not a built-in module'.format(fullname), name=fullname) return fxn(self, fullname) _wrap(_requires_builtin_wrapper, fxn) return _requires_builtin_wrapper def _requires_frozen(fxn): """Decorator to verify the named module is frozen.""" def _requires_frozen_wrapper(self, fullname): if not _imp.is_frozen(fullname): raise ImportError('{!r} is not a frozen module'.format(fullname), name=fullname) return fxn(self, fullname) _wrap(_requires_frozen_wrapper, fxn) return _requires_frozen_wrapper def _find_module_shim(self, fullname): """Try to find a loader for the specified module by delegating to self.find_loader(). This method is deprecated in favor of finder.find_spec(). """ # Call find_loader(). If it returns a string (indicating this # is a namespace package portion), generate a warning and # return None. loader, portions = self.find_loader(fullname) if loader is None and len(portions): msg = 'Not importing directory {}: missing __init__' _warnings.warn(msg.format(portions[0]), ImportWarning) return loader def _load_module_shim(self, fullname): """Load the specified module into sys.modules and return it. This method is deprecated. Use loader.exec_module instead. """ spec = spec_from_loader(fullname, self) methods = _SpecMethods(spec) if fullname in sys.modules: module = sys.modules[fullname] methods.exec(module) return sys.modules[fullname] else: return methods.load() def _validate_bytecode_header(data, source_stats=None, name=None, path=None): """Validate the header of the passed-in bytecode against source_stats (if given) and returning the bytecode that can be compiled by compile(). All other arguments are used to enhance error reporting. ImportError is raised when the magic number is incorrect or the bytecode is found to be stale. EOFError is raised when the data is found to be truncated. """ exc_details = {} if name is not None: exc_details['name'] = name else: # To prevent having to make all messages have a conditional name. name = '<bytecode>' if path is not None: exc_details['path'] = path magic = data[:4] raw_timestamp = data[4:8] raw_size = data[8:12] if magic != MAGIC_NUMBER: message = 'bad magic number in {!r}: {!r}'.format(name, magic) _verbose_message(message) raise ImportError(message, **exc_details) elif len(raw_timestamp) != 4: message = 'reached EOF while reading timestamp in {!r}'.format(name) _verbose_message(message) raise EOFError(message) elif len(raw_size) != 4: message = 'reached EOF while reading size of source in {!r}'.format(name) _verbose_message(message) raise EOFError(message) if source_stats is not None: try: source_mtime = int(source_stats['mtime']) except KeyError: pass else: if _r_long(raw_timestamp) != source_mtime: message = 'bytecode is stale for {!r}'.format(name) _verbose_message(message) raise ImportError(message, **exc_details) try: source_size = source_stats['size'] & 0xFFFFFFFF except KeyError: pass else: if _r_long(raw_size) != source_size: raise ImportError('bytecode is stale for {!r}'.format(name), **exc_details) return data[12:] def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): """Compile bytecode as returned by _validate_bytecode_header().""" code = marshal.loads(data) if isinstance(code, _code_type): _verbose_message('code object from {!r}', bytecode_path) if source_path is not None: _imp._fix_co_filename(code, source_path) return code else: raise ImportError('Non-code object in {!r}'.format(bytecode_path), name=name, path=bytecode_path) def _code_to_bytecode(code, mtime=0, source_size=0): """Compile a code object into bytecode for writing out to a byte-compiled file.""" data = bytearray(MAGIC_NUMBER) data.extend(_w_long(mtime)) data.extend(_w_long(source_size)) data.extend(marshal.dumps(code)) return data def decode_source(source_bytes): """Decode bytes representing source code and return the string. Universal newline support is used in the decoding. """ import tokenize # To avoid bootstrap issues. source_bytes_readline = _io.BytesIO(source_bytes).readline encoding = tokenize.detect_encoding(source_bytes_readline) newline_decoder = _io.IncrementalNewlineDecoder(None, True) return newline_decoder.decode(source_bytes.decode(encoding[0])) # Module specifications ####################################################### def _module_repr(module): # The implementation of ModuleType__repr__(). loader = getattr(module, '__loader__', None) if hasattr(loader, 'module_repr'): # As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader # drop their implementations for module_repr. we can add a # deprecation warning here. try: return loader.module_repr(module) except Exception: pass try: spec = module.__spec__ except AttributeError: pass else: if spec is not None: return _SpecMethods(spec).module_repr() # We could use module.__class__.__name__ instead of 'module' in the # various repr permutations. try: name = module.__name__ except AttributeError: name = '?' try: filename = module.__file__ except AttributeError: if loader is None: return '<module {!r}>'.format(name) else: return '<module {!r} ({!r})>'.format(name, loader) else: return '<module {!r} from {!r}>'.format(name, filename) class _installed_safely: def __init__(self, module): self._module = module self._spec = module.__spec__ def __enter__(self): # This must be done before putting the module in sys.modules # (otherwise an optimization shortcut in import.c becomes # wrong) self._spec._initializing = True sys.modules[self._spec.name] = self._module def __exit__(self, *args): try: spec = self._spec if any(arg is not None for arg in args): try: del sys.modules[spec.name] except KeyError: pass else: _verbose_message('import {!r} # {!r}', spec.name, spec.loader) finally: self._spec._initializing = False class ModuleSpec: """The specification for a module, used for loading. A module's spec is the source for information about the module. For data associated with the module, including source, use the spec's loader. `name` is the absolute name of the module. `loader` is the loader to use when loading the module. `parent` is the name of the package the module is in. The parent is derived from the name. `is_package` determines if the module is considered a package or not. On modules this is reflected by the `__path__` attribute. `origin` is the specific location used by the loader from which to load the module, if that information is available. When filename is set, origin will match. `has_location` indicates that a spec's "origin" reflects a location. When this is True, `__file__` attribute of the module is set. `cached` is the location of the cached bytecode file, if any. It corresponds to the `__cached__` attribute. `submodule_search_locations` is the sequence of path entries to search when importing submodules. If set, is_package should be True--and False otherwise. Packages are simply modules that (may) have submodules. If a spec has a non-None value in `submodule_search_locations`, the import system will consider modules loaded from the spec as packages. Only finders (see importlib.abc.MetaPathFinder and importlib.abc.PathEntryFinder) should modify ModuleSpec instances. """ def __init__(self, name, loader, *, origin=None, loader_state=None, is_package=None): self.name = name self.loader = loader self.origin = origin self.loader_state = loader_state self.submodule_search_locations = [] if is_package else None # file-location attributes self._set_fileattr = False self._cached = None def __repr__(self): args = ['name={!r}'.format(self.name), 'loader={!r}'.format(self.loader)] if self.origin is not None: args.append('origin={!r}'.format(self.origin)) if self.submodule_search_locations is not None: args.append('submodule_search_locations={}' .format(self.submodule_search_locations)) return '{}({})'.format(self.__class__.__name__, ', '.join(args)) def __eq__(self, other): smsl = self.submodule_search_locations try: return (self.name == other.name and self.loader == other.loader and self.origin == other.origin and smsl == other.submodule_search_locations and self.cached == other.cached and self.has_location == other.has_location) except AttributeError: return False @property def cached(self): if self._cached is None: if self.origin is not None and self._set_fileattr: filename = self.origin if filename.endswith(tuple(SOURCE_SUFFIXES)): try: self._cached = cache_from_source(filename) except NotImplementedError: pass elif filename.endswith(tuple(BYTECODE_SUFFIXES)): self._cached = filename return self._cached @cached.setter def cached(self, cached): self._cached = cached @property def parent(self): """The name of the module's parent.""" if self.submodule_search_locations is None: return self.name.rpartition('.')[0] else: return self.name @property def has_location(self): return self._set_fileattr @has_location.setter def has_location(self, value): self._set_fileattr = bool(value) def spec_from_loader(name, loader, *, origin=None, is_package=None): """Return a module spec based on various loader methods.""" if hasattr(loader, 'get_filename'): if is_package is None: return spec_from_file_location(name, loader=loader) search = [] if is_package else None return spec_from_file_location(name, loader=loader, submodule_search_locations=search) if is_package is None: if hasattr(loader, 'is_package'): try: is_package = loader.is_package(name) except ImportError: is_package = None # aka, undefined else: # the default is_package = False return ModuleSpec(name, loader, origin=origin, is_package=is_package) _POPULATE = object() def spec_from_file_location(name, location=None, *, loader=None, submodule_search_locations=_POPULATE): """Return a module spec based on a file location. To indicate that the module is a package, set submodule_search_locations to a list of directory paths. An empty list is sufficient, though its not otherwise useful to the import system. The loader must take a spec as its only __init__() arg. """ if location is None: # The caller may simply want a partially populated location- # oriented spec. So we set the location to a bogus value and # fill in as much as we can. location = '<unknown>' if hasattr(loader, 'get_filename'): # ExecutionLoader try: location = loader.get_filename(name) except ImportError: pass # If the location is on the filesystem, but doesn't actually exist, # we could return None here, indicating that the location is not # valid. However, we don't have a good way of testing since an # indirect location (e.g. a zip file or URL) will look like a # non-existent file relative to the filesystem. spec = ModuleSpec(name, loader, origin=location) spec._set_fileattr = True # Pick a loader if one wasn't provided. if loader is None: for loader_class, suffixes in _get_supported_file_loaders(): if location.endswith(tuple(suffixes)): loader = loader_class(name, location) spec.loader = loader break else: return None # Set submodule_search_paths appropriately. if submodule_search_locations is _POPULATE: # Check the loader. if hasattr(loader, 'is_package'): try: is_package = loader.is_package(name) except ImportError: pass else: if is_package: spec.submodule_search_locations = [] else: spec.submodule_search_locations = submodule_search_locations if spec.submodule_search_locations == []: if location: dirname = _path_split(location)[0] spec.submodule_search_locations.append(dirname) return spec def _spec_from_module(module, loader=None, origin=None): # This function is meant for use in _setup(). try: spec = module.__spec__ except AttributeError: pass else: if spec is not None: return spec name = module.__name__ if loader is None: try: loader = module.__loader__ except AttributeError: # loader will stay None. pass try: location = module.__file__ except AttributeError: location = None if origin is None: if location is None: try: origin = loader._ORIGIN except AttributeError: origin = None else: origin = location try: cached = module.__cached__ except AttributeError: cached = None try: submodule_search_locations = list(module.__path__) except AttributeError: submodule_search_locations = None spec = ModuleSpec(name, loader, origin=origin) spec._set_fileattr = False if location is None else True spec.cached = cached spec.submodule_search_locations = submodule_search_locations return spec class _SpecMethods: """Convenience wrapper around spec objects to provide spec-specific methods.""" # The various spec_from_* functions could be made factory methods here. def __init__(self, spec): self.spec = spec def module_repr(self): """Return the repr to use for the module.""" # We mostly replicate _module_repr() using the spec attributes. spec = self.spec name = '?' if spec.name is None else spec.name if spec.origin is None: if spec.loader is None: return '<module {!r}>'.format(name) else: return '<module {!r} ({!r})>'.format(name, spec.loader) else: if spec.has_location: return '<module {!r} from {!r}>'.format(name, spec.origin) else: return '<module {!r} ({})>'.format(spec.name, spec.origin) def init_module_attrs(self, module, *, _override=False, _force_name=True): """Set the module's attributes. All missing import-related module attributes will be set. Here is how the spec attributes map onto the module: spec.name -> module.__name__ spec.loader -> module.__loader__ spec.parent -> module.__package__ spec -> module.__spec__ Optional: spec.origin -> module.__file__ (if spec.set_fileattr is true) spec.cached -> module.__cached__ (if __file__ also set) spec.submodule_search_locations -> module.__path__ (if set) """ spec = self.spec # The passed in module may be not support attribute assignment, # in which case we simply don't set the attributes. # __name__ if (_override or _force_name or getattr(module, '__name__', None) is None): try: module.__name__ = spec.name except AttributeError: pass # __loader__ if _override or getattr(module, '__loader__', None) is None: loader = spec.loader if loader is None: # A backward compatibility hack. if spec.submodule_search_locations is not None: loader = _NamespaceLoader.__new__(_NamespaceLoader) loader._path = spec.submodule_search_locations try: module.__loader__ = loader except AttributeError: pass # __package__ if _override or getattr(module, '__package__', None) is None: try: module.__package__ = spec.parent except AttributeError: pass # __spec__ try: module.__spec__ = spec except AttributeError: pass # __path__ if _override or getattr(module, '__path__', None) is None: if spec.submodule_search_locations is not None: try: module.__path__ = spec.submodule_search_locations except AttributeError: pass if spec.has_location: # __file__ if _override or getattr(module, '__file__', None) is None: try: module.__file__ = spec.origin except AttributeError: pass # __cached__ if _override or getattr(module, '__cached__', None) is None: if spec.cached is not None: try: module.__cached__ = spec.cached except AttributeError: pass def create(self): """Return a new module to be loaded. The import-related module attributes are also set with the appropriate values from the spec. """ spec = self.spec # Typically loaders will not implement create_module(). if hasattr(spec.loader, 'create_module'): # If create_module() returns `None` it means the default # module creation should be used. module = spec.loader.create_module(spec) else: module = None if module is None: # This must be done before open() is ever called as the 'io' # module implicitly imports 'locale' and would otherwise # trigger an infinite loop. module = _new_module(spec.name) self.init_module_attrs(module) return module def _exec(self, module): """Do everything necessary to execute the module. The namespace of `module` is used as the target of execution. This method uses the loader's `exec_module()` method. """ self.spec.loader.exec_module(module) # Used by importlib.reload() and _load_module_shim(). def exec(self, module): """Execute the spec in an existing module's namespace.""" name = self.spec.name _imp.acquire_lock() with _ModuleLockManager(name): if sys.modules.get(name) is not module: msg = 'module {!r} not in sys.modules'.format(name) raise ImportError(msg, name=name) if self.spec.loader is None: if self.spec.submodule_search_locations is None: raise ImportError('missing loader', name=self.spec.name) # namespace package self.init_module_attrs(module, _override=True) return module self.init_module_attrs(module, _override=True) if not hasattr(self.spec.loader, 'exec_module'): # (issue19713) Once BuiltinImporter and ExtensionFileLoader # have exec_module() implemented, we can add a deprecation # warning here. self.spec.loader.load_module(name) else: self._exec(module) return sys.modules[name] def _load_backward_compatible(self): # (issue19713) Once BuiltinImporter and ExtensionFileLoader # have exec_module() implemented, we can add a deprecation # warning here. spec = self.spec spec.loader.load_module(spec.name) # The module must be in sys.modules at this point! module = sys.modules[spec.name] if getattr(module, '__loader__', None) is None: try: module.__loader__ = spec.loader except AttributeError: pass if getattr(module, '__package__', None) is None: try: # Since module.__path__ may not line up with # spec.submodule_search_paths, we can't necessarily rely # on spec.parent here. module.__package__ = module.__name__ if not hasattr(module, '__path__'): module.__package__ = spec.name.rpartition('.')[0] except AttributeError: pass if getattr(module, '__spec__', None) is None: try: module.__spec__ = spec except AttributeError: pass return module def _load_unlocked(self): # A helper for direct use by the import system. if self.spec.loader is not None: # not a namespace package if not hasattr(self.spec.loader, 'exec_module'): return self._load_backward_compatible() module = self.create() with _installed_safely(module): if self.spec.loader is None: if self.spec.submodule_search_locations is None: raise ImportError('missing loader', name=self.spec.name) # A namespace package so do nothing. else: self._exec(module) # We don't ensure that the import-related module attributes get # set in the sys.modules replacement case. Such modules are on # their own. return sys.modules[self.spec.name] # A method used during testing of _load_unlocked() and by # _load_module_shim(). def load(self): """Return a new module object, loaded by the spec's loader. The module is not added to its parent. If a module is already in sys.modules, that existing module gets clobbered. """ _imp.acquire_lock() with _ModuleLockManager(self.spec.name): return self._load_unlocked() def _fix_up_module(ns, name, pathname, cpathname=None): # This function is used by PyImport_ExecCodeModuleObject(). loader = ns.get('__loader__') spec = ns.get('__spec__') if not loader: if spec: loader = spec.loader elif pathname == cpathname: loader = SourcelessFileLoader(name, pathname) else: loader = SourceFileLoader(name, pathname) if not spec: spec = spec_from_file_location(name, pathname, loader=loader) try: ns['__spec__'] = spec ns['__loader__'] = loader ns['__file__'] = pathname ns['__cached__'] = cpathname except Exception: # Not important enough to report. pass # Loaders ##################################################################### class BuiltinImporter: """Meta path import for built-in modules. All methods are either class or static methods to avoid the need to instantiate the class. """ @staticmethod def module_repr(module): """Return repr for the module. The method is deprecated. The import machinery does the job itself. """ return '<module {!r} (built-in)>'.format(module.__name__) @classmethod def find_spec(cls, fullname, path=None, target=None): if path is not None: return None if _imp.is_builtin(fullname): return spec_from_loader(fullname, cls, origin='built-in') else: return None @classmethod def find_module(cls, fullname, path=None): """Find the built-in module. If 'path' is ever specified then the search is considered a failure. This method is deprecated. Use find_spec() instead. """ spec = cls.find_spec(fullname, path) return spec.loader if spec is not None else None @classmethod @_requires_builtin def load_module(cls, fullname): """Load a built-in module.""" # Once an exec_module() implementation is added we can also # add a deprecation warning here. with _ManageReload(fullname): module = _call_with_frames_removed(_imp.init_builtin, fullname) module.__loader__ = cls module.__package__ = '' return module @classmethod @_requires_builtin def get_code(cls, fullname): """Return None as built-in modules do not have code objects.""" return None @classmethod @_requires_builtin def get_source(cls, fullname): """Return None as built-in modules do not have source code.""" return None @classmethod @_requires_builtin def is_package(cls, fullname): """Return False as built-in modules are never packages.""" return False class FrozenImporter: """Meta path import for frozen modules. All methods are either class or static methods to avoid the need to instantiate the class. """ @staticmethod def module_repr(m): """Return repr for the module. The method is deprecated. The import machinery does the job itself. """ return '<module {!r} (frozen)>'.format(m.__name__) @classmethod def find_spec(cls, fullname, path=None, target=None): if _imp.is_frozen(fullname): return spec_from_loader(fullname, cls, origin='frozen') else: return None @classmethod def find_module(cls, fullname, path=None): """Find a frozen module. This method is deprecated. Use find_spec() instead. """ return cls if _imp.is_frozen(fullname) else None @staticmethod def exec_module(module): name = module.__spec__.name if not _imp.is_frozen(name): raise ImportError('{!r} is not a frozen module'.format(name), name=name) code = _call_with_frames_removed(_imp.get_frozen_object, name) exec(code, module.__dict__) @classmethod def load_module(cls, fullname): """Load a frozen module. This method is deprecated. Use exec_module() instead. """ return _load_module_shim(cls, fullname) @classmethod @_requires_frozen def get_code(cls, fullname): """Return the code object for the frozen module.""" return _imp.get_frozen_object(fullname) @classmethod @_requires_frozen def get_source(cls, fullname): """Return None as frozen modules do not have source code.""" return None @classmethod @_requires_frozen def is_package(cls, fullname): """Return True if the frozen module is a package.""" return _imp.is_frozen_package(fullname) class WindowsRegistryFinder: """Meta path finder for modules declared in the Windows registry.""" REGISTRY_KEY = ( 'Software\\Python\\PythonCore\\{sys_version}' '\\Modules\\{fullname}') REGISTRY_KEY_DEBUG = ( 'Software\\Python\\PythonCore\\{sys_version}' '\\Modules\\{fullname}\\Debug') DEBUG_BUILD = False # Changed in _setup() @classmethod def _open_registry(cls, key): try: return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key) except OSError: return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key) @classmethod def _search_registry(cls, fullname): if cls.DEBUG_BUILD: registry_key = cls.REGISTRY_KEY_DEBUG else: registry_key = cls.REGISTRY_KEY key = registry_key.format(fullname=fullname, sys_version=sys.version[:3]) try: with cls._open_registry(key) as hkey: filepath = _winreg.QueryValue(hkey, '') except OSError: return None return filepath @classmethod def find_spec(cls, fullname, path=None, target=None): filepath = cls._search_registry(fullname) if filepath is None: return None try: _path_stat(filepath) except OSError: return None for loader, suffixes in _get_supported_file_loaders(): if filepath.endswith(tuple(suffixes)): spec = spec_from_loader(fullname, loader(fullname, filepath), origin=filepath) return spec @classmethod def find_module(cls, fullname, path=None): """Find module named in the registry. This method is deprecated. Use exec_module() instead. """ spec = cls.find_spec(fullname, path) if spec is not None: return spec.loader else: return None class _LoaderBasics: """Base class of common code needed by both SourceLoader and SourcelessFileLoader.""" def is_package(self, fullname): """Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'.""" filename = _path_split(self.get_filename(fullname))[1] filename_base = filename.rsplit('.', 1)[0] tail_name = fullname.rpartition('.')[2] return filename_base == '__init__' and tail_name != '__init__' def exec_module(self, module): """Execute the module.""" code = self.get_code(module.__name__) if code is None: raise ImportError('cannot load module {!r} when get_code() ' 'returns None'.format(module.__name__)) _call_with_frames_removed(exec, code, module.__dict__) load_module = _load_module_shim class SourceLoader(_LoaderBasics): def path_mtime(self, path): """Optional method that returns the modification time (an int) for the specified path, where path is a str. Raises IOError when the path cannot be handled. """ raise IOError def path_stats(self, path): """Optional method returning a metadata dict for the specified path to by the path (str). Possible keys: - 'mtime' (mandatory) is the numeric timestamp of last source code modification; - 'size' (optional) is the size in bytes of the source code. Implementing this method allows the loader to read bytecode files. Raises IOError when the path cannot be handled. """ return {'mtime': self.path_mtime(path)} def _cache_bytecode(self, source_path, cache_path, data): """Optional method which writes data (bytes) to a file path (a str). Implementing this method allows for the writing of bytecode files. The source path is needed in order to correctly transfer permissions """ # For backwards compatibility, we delegate to set_data() return self.set_data(cache_path, data) def set_data(self, path, data): """Optional method which writes data (bytes) to a file path (a str). Implementing this method allows for the writing of bytecode files. """ def get_source(self, fullname): """Concrete implementation of InspectLoader.get_source.""" path = self.get_filename(fullname) try: source_bytes = self.get_data(path) except OSError as exc: raise ImportError('source not available through get_data()', name=fullname) from exc return decode_source(source_bytes) def source_to_code(self, data, path, *, _optimize=-1): """Return the code object compiled from source. The 'data' argument can be any object type that compile() supports. """ return _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True, optimize=_optimize) def get_code(self, fullname): """Concrete implementation of InspectLoader.get_code. Reading of bytecode requires path_stats to be implemented. To write bytecode, set_data must also be implemented. """ source_path = self.get_filename(fullname) source_mtime = None try: bytecode_path = cache_from_source(source_path) except NotImplementedError: bytecode_path = None else: try: st = self.path_stats(source_path) except IOError: pass else: source_mtime = int(st['mtime']) try: data = self.get_data(bytecode_path) except OSError: pass else: try: bytes_data = _validate_bytecode_header(data, source_stats=st, name=fullname, path=bytecode_path) except (ImportError, EOFError): pass else: _verbose_message('{} matches {}', bytecode_path, source_path) return _compile_bytecode(bytes_data, name=fullname, bytecode_path=bytecode_path, source_path=source_path) source_bytes = self.get_data(source_path) code_object = self.source_to_code(source_bytes, source_path) _verbose_message('code object from {}', source_path) if (not sys.dont_write_bytecode and bytecode_path is not None and source_mtime is not None): data = _code_to_bytecode(code_object, source_mtime, len(source_bytes)) try: self._cache_bytecode(source_path, bytecode_path, data) _verbose_message('wrote {!r}', bytecode_path) except NotImplementedError: pass return code_object class FileLoader: """Base file loader class which implements the loader protocol methods that require file system usage.""" def __init__(self, fullname, path): """Cache the module name and the path to the file found by the finder.""" self.name = fullname self.path = path def __eq__(self, other): return (self.__class__ == other.__class__ and self.__dict__ == other.__dict__) def __hash__(self): return hash(self.name) ^ hash(self.path) @_check_name def load_module(self, fullname): """Load a module from a file. This method is deprecated. Use exec_module() instead. """ # The only reason for this method is for the name check. # Issue #14857: Avoid the zero-argument form of super so the implementation # of that form can be updated without breaking the frozen module return super(FileLoader, self).load_module(fullname) @_check_name def get_filename(self, fullname): """Return the path to the source file as found by the finder.""" return self.path def get_data(self, path): """Return the data from path as raw bytes.""" with _io.FileIO(path, 'r') as file: return file.read() class SourceFileLoader(FileLoader, SourceLoader): """Concrete implementation of SourceLoader using the file system.""" def path_stats(self, path): """Return the metadata for the path.""" st = _path_stat(path) return {'mtime': st.st_mtime, 'size': st.st_size} def _cache_bytecode(self, source_path, bytecode_path, data): # Adapt between the two APIs mode = _calc_mode(source_path) return self.set_data(bytecode_path, data, _mode=mode) def set_data(self, path, data, *, _mode=0o666): """Write bytes data to a file.""" parent, filename = _path_split(path) path_parts = [] # Figure out what directories are missing. while parent and not _path_isdir(parent): parent, part = _path_split(parent) path_parts.append(part) # Create needed directories. for part in reversed(path_parts): parent = _path_join(parent, part) try: _os.mkdir(parent) except FileExistsError: # Probably another Python process already created the dir. continue except OSError as exc: # Could be a permission error, read-only filesystem: just forget # about writing the data. _verbose_message('could not create {!r}: {!r}', parent, exc) return try: _write_atomic(path, data, _mode) _verbose_message('created {!r}', path) except OSError as exc: # Same as above: just don't write the bytecode. _verbose_message('could not create {!r}: {!r}', path, exc) class SourcelessFileLoader(FileLoader, _LoaderBasics): """Loader which handles sourceless file imports.""" def get_code(self, fullname): path = self.get_filename(fullname) data = self.get_data(path) bytes_data = _validate_bytecode_header(data, name=fullname, path=path) return _compile_bytecode(bytes_data, name=fullname, bytecode_path=path) def get_source(self, fullname): """Return None as there is no source code.""" return None # Filled in by _setup(). EXTENSION_SUFFIXES = [] class ExtensionFileLoader: """Loader for extension modules. The constructor is designed to work with FileFinder. """ def __init__(self, name, path): self.name = name self.path = path def __eq__(self, other): return (self.__class__ == other.__class__ and self.__dict__ == other.__dict__) def __hash__(self): return hash(self.name) ^ hash(self.path) @_check_name def load_module(self, fullname): """Load an extension module.""" # Once an exec_module() implementation is added we can also # add a deprecation warning here. with _ManageReload(fullname): module = _call_with_frames_removed(_imp.load_dynamic, fullname, self.path) _verbose_message('extension module loaded from {!r}', self.path) is_package = self.is_package(fullname) if is_package and not hasattr(module, '__path__'): module.__path__ = [_path_split(self.path)[0]] module.__loader__ = self module.__package__ = module.__name__ if not is_package: module.__package__ = module.__package__.rpartition('.')[0] return module def is_package(self, fullname): """Return True if the extension module is a package.""" file_name = _path_split(self.path)[1] return any(file_name == '__init__' + suffix for suffix in EXTENSION_SUFFIXES) def get_code(self, fullname): """Return None as an extension module cannot create a code object.""" return None def get_source(self, fullname): """Return None as extension modules have no source code.""" return None @_check_name def get_filename(self, fullname): """Return the path to the source file as found by the finder.""" return self.path class _NamespacePath: """Represents a namespace package's path. It uses the module name to find its parent module, and from there it looks up the parent's __path__. When this changes, the module's own path is recomputed, using path_finder. For top-level modules, the parent module's path is sys.path.""" def __init__(self, name, path, path_finder): self._name = name self._path = path self._last_parent_path = tuple(self._get_parent_path()) self._path_finder = path_finder def _find_parent_path_names(self): """Returns a tuple of (parent-module-name, parent-path-attr-name)""" parent, dot, me = self._name.rpartition('.') if dot == '': # This is a top-level module. sys.path contains the parent path. return 'sys', 'path' # Not a top-level module. parent-module.__path__ contains the # parent path. return parent, '__path__' def _get_parent_path(self): parent_module_name, path_attr_name = self._find_parent_path_names() return getattr(sys.modules[parent_module_name], path_attr_name) def _recalculate(self): # If the parent's path has changed, recalculate _path parent_path = tuple(self._get_parent_path()) # Make a copy if parent_path != self._last_parent_path: spec = self._path_finder(self._name, parent_path) # Note that no changes are made if a loader is returned, but we # do remember the new parent path if spec is not None and spec.loader is None: if spec.submodule_search_locations: self._path = spec.submodule_search_locations self._last_parent_path = parent_path # Save the copy return self._path def __iter__(self): return iter(self._recalculate()) def __len__(self): return len(self._recalculate()) def __repr__(self): return '_NamespacePath({!r})'.format(self._path) def __contains__(self, item): return item in self._recalculate() def append(self, item): self._path.append(item) # We use this exclusively in init_module_attrs() for backward-compatibility. class _NamespaceLoader: def __init__(self, name, path, path_finder): self._path = _NamespacePath(name, path, path_finder) @classmethod def module_repr(cls, module): """Return repr for the module. The method is deprecated. The import machinery does the job itself. """ return '<module {!r} (namespace)>'.format(module.__name__) def is_package(self, fullname): return True def get_source(self, fullname): return '' def get_code(self, fullname): return compile('', '<string>', 'exec', dont_inherit=True) def exec_module(self, module): pass def load_module(self, fullname): """Load a namespace module. This method is deprecated. Use exec_module() instead. """ # The import system never calls this method. _verbose_message('namespace module loaded with path {!r}', self._path) return _load_module_shim(self, fullname) # Finders ##################################################################### class PathFinder: """Meta path finder for sys.path and package __path__ attributes.""" @classmethod def invalidate_caches(cls): """Call the invalidate_caches() method on all path entry finders stored in sys.path_importer_caches (where implemented).""" for finder in sys.path_importer_cache.values(): if hasattr(finder, 'invalidate_caches'): finder.invalidate_caches() @classmethod def _path_hooks(cls, path): """Search sequence of hooks for a finder for 'path'. If 'hooks' is false then use sys.path_hooks. """ if not sys.path_hooks: _warnings.warn('sys.path_hooks is empty', ImportWarning) for hook in sys.path_hooks: try: return hook(path) except ImportError: continue else: return None @classmethod def _path_importer_cache(cls, path): """Get the finder for the path entry from sys.path_importer_cache. If the path entry is not in the cache, find the appropriate finder and cache it. If no finder is available, store None. """ if path == '': path = _os.getcwd() try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder @classmethod def _legacy_get_spec(cls, fullname, finder): # This would be a good place for a DeprecationWarning if # we ended up going that route. if hasattr(finder, 'find_loader'): loader, portions = finder.find_loader(fullname) else: loader = finder.find_module(fullname) portions = [] if loader is not None: return spec_from_loader(fullname, loader) spec = ModuleSpec(fullname, None) spec.submodule_search_locations = portions return spec @classmethod def _get_spec(cls, fullname, path, target=None): """Find the loader or namespace_path for this module/package name.""" # If this ends up being a namespace package, namespace_path is # the list of paths that will become its __path__ namespace_path = [] for entry in path: if not isinstance(entry, (str, bytes)): continue finder = cls._path_importer_cache(entry) if finder is not None: if hasattr(finder, 'find_spec'): spec = finder.find_spec(fullname, target) else: spec = cls._legacy_get_spec(fullname, finder) if spec is None: continue if spec.loader is not None: return spec portions = spec.submodule_search_locations if portions is None: raise ImportError('spec missing loader') # This is possibly part of a namespace package. # Remember these path entries (if any) for when we # create a namespace package, and continue iterating # on path. namespace_path.extend(portions) else: spec = ModuleSpec(fullname, None) spec.submodule_search_locations = namespace_path return spec @classmethod def find_spec(cls, fullname, path=None, target=None): """find the module on sys.path or 'path' based on sys.path_hooks and sys.path_importer_cache.""" if path is None: path = sys.path spec = cls._get_spec(fullname, path, target) if spec is None: return None elif spec.loader is None: namespace_path = spec.submodule_search_locations if namespace_path: # We found at least one namespace path. Return a # spec which can create the namespace package. spec.origin = 'namespace' spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) return spec else: return None else: return spec @classmethod def find_module(cls, fullname, path=None): """find the module on sys.path or 'path' based on sys.path_hooks and sys.path_importer_cache. This method is deprecated. Use find_spec() instead. """ spec = cls.find_spec(fullname, path) if spec is None: return None return spec.loader class FileFinder: """File-based finder. Interactions with the file system are cached for performance, being refreshed when the directory the finder is handling has been modified. """ def __init__(self, path, *loader_details): """Initialize with the path to search on and a variable number of 2-tuples containing the loader and the file suffixes the loader recognizes.""" loaders = [] for loader, suffixes in loader_details: loaders.extend((suffix, loader) for suffix in suffixes) self._loaders = loaders # Base (directory) path self.path = path or '.' self._path_mtime = -1 self._path_cache = set() self._relaxed_path_cache = set() def invalidate_caches(self): """Invalidate the directory mtime.""" self._path_mtime = -1 find_module = _find_module_shim def find_loader(self, fullname): """Try to find a loader for the specified module, or the namespace package portions. Returns (loader, list-of-portions). This method is deprecated. Use find_spec() instead. """ spec = self.find_spec(fullname) if spec is None: return None, [] return spec.loader, spec.submodule_search_locations or [] def _get_spec(self, loader_class, fullname, path, smsl, target): loader = loader_class(fullname, path) return spec_from_file_location(fullname, path, loader=loader, submodule_search_locations=smsl) def find_spec(self, fullname, target=None): """Try to find a loader for the specified module, or the namespace package portions. Returns (loader, list-of-portions).""" is_namespace = False tail_module = fullname.rpartition('.')[2] try: mtime = _path_stat(self.path or _os.getcwd()).st_mtime except OSError: mtime = -1 if mtime != self._path_mtime: self._fill_cache() self._path_mtime = mtime # tail_module keeps the original casing, for __file__ and friends if _relax_case(): cache = self._relaxed_path_cache cache_module = tail_module.lower() else: cache = self._path_cache cache_module = tail_module # Check if the module is the name of a directory (and thus a package). if cache_module in cache: base_path = _path_join(self.path, tail_module) for suffix, loader_class in self._loaders: init_filename = '__init__' + suffix full_path = _path_join(base_path, init_filename) if _path_isfile(full_path): return self._get_spec(loader_class, fullname, full_path, [base_path], target) else: # If a namespace package, return the path if we don't # find a module in the next section. is_namespace = _path_isdir(base_path) # Check for a file w/ a proper suffix exists. for suffix, loader_class in self._loaders: full_path = _path_join(self.path, tail_module + suffix) _verbose_message('trying {}'.format(full_path), verbosity=2) if cache_module + suffix in cache: if _path_isfile(full_path): return self._get_spec(loader_class, fullname, full_path, None, target) if is_namespace: _verbose_message('possible namespace for {}'.format(base_path)) spec = ModuleSpec(fullname, None) spec.submodule_search_locations = [base_path] return spec return None def _fill_cache(self): """Fill the cache of potential modules and packages for this directory.""" path = self.path try: contents = _os.listdir(path or _os.getcwd()) except (FileNotFoundError, PermissionError, NotADirectoryError): # Directory has either been removed, turned into a file, or made # unreadable. contents = [] # We store two cached versions, to handle runtime changes of the # PYTHONCASEOK environment variable. if not sys.platform.startswith('win'): self._path_cache = set(contents) else: # Windows users can import modules with case-insensitive file # suffixes (for legacy reasons). Make the suffix lowercase here # so it's done once instead of for every import. This is safe as # the specified suffixes to check against are always specified in a # case-sensitive manner. lower_suffix_contents = set() for item in contents: name, dot, suffix = item.partition('.') if dot: new_name = '{}.{}'.format(name, suffix.lower()) else: new_name = name lower_suffix_contents.add(new_name) self._path_cache = lower_suffix_contents if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): self._relaxed_path_cache = {fn.lower() for fn in contents} @classmethod def path_hook(cls, *loader_details): """A class method which returns a closure to use on sys.path_hook which will return an instance using the specified loaders and the path called on the closure. If the path called on the closure is not a directory, ImportError is raised. """ def path_hook_for_FileFinder(path): """Path hook for importlib.machinery.FileFinder.""" if not _path_isdir(path): raise ImportError('only directories are supported', path=path) return cls(path, *loader_details) return path_hook_for_FileFinder def __repr__(self): return 'FileFinder({!r})'.format(self.path) # Import itself ############################################################### class _ImportLockContext: """Context manager for the import lock.""" def __enter__(self): """Acquire the import lock.""" _imp.acquire_lock() def __exit__(self, exc_type, exc_value, exc_traceback): """Release the import lock regardless of any raised exceptions.""" _imp.release_lock() def _resolve_name(name, package, level): """Resolve a relative module name to an absolute one.""" bits = package.rsplit('.', level - 1) if len(bits) < level: raise ValueError('attempted relative import beyond top-level package') base = bits[0] return '{}.{}'.format(base, name) if name else base def _find_spec_legacy(finder, name, path): # This would be a good place for a DeprecationWarning if # we ended up going that route. loader = finder.find_module(name, path) if loader is None: return None return spec_from_loader(name, loader) def _find_spec(name, path, target=None): """Find a module's loader.""" if not sys.meta_path: _warnings.warn('sys.meta_path is empty', ImportWarning) # We check sys.modules here for the reload case. While a passed-in # target will usually indicate a reload there is no guarantee, whereas # sys.modules provides one. is_reload = name in sys.modules for finder in sys.meta_path: with _ImportLockContext(): try: find_spec = finder.find_spec except AttributeError: spec = _find_spec_legacy(finder, name, path) if spec is None: continue else: spec = find_spec(name, path, target) if spec is not None: # The parent import may have already imported this module. if not is_reload and name in sys.modules: module = sys.modules[name] try: __spec__ = module.__spec__ except AttributeError: # We use the found spec since that is the one that # we would have used if the parent module hadn't # beaten us to the punch. return spec else: if __spec__ is None: return spec else: return __spec__ else: return spec else: return None def _sanity_check(name, package, level): """Verify arguments are "sane".""" if not isinstance(name, str): raise TypeError('module name must be str, not {}'.format(type(name))) if level < 0: raise ValueError('level must be >= 0') if package: if not isinstance(package, str): raise TypeError('__package__ not set to a string') elif package not in sys.modules: msg = ('Parent module {!r} not loaded, cannot perform relative ' 'import') raise SystemError(msg.format(package)) if not name and level == 0: raise ValueError('Empty module name') _ERR_MSG_PREFIX = 'No module named ' _ERR_MSG = _ERR_MSG_PREFIX + '{!r}' def _find_and_load_unlocked(name, import_): path = None parent = name.rpartition('.')[0] if parent: if parent not in sys.modules: _call_with_frames_removed(import_, parent) # Crazy side-effects! if name in sys.modules: return sys.modules[name] parent_module = sys.modules[parent] try: path = parent_module.__path__ except AttributeError: msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent) raise ImportError(msg, name=name) spec = _find_spec(name, path) if spec is None: raise ImportError(_ERR_MSG.format(name), name=name) else: module = _SpecMethods(spec)._load_unlocked() if parent: # Set the module as an attribute on its parent. parent_module = sys.modules[parent] setattr(parent_module, name.rpartition('.')[2], module) return module def _find_and_load(name, import_): """Find and load the module, and release the import lock.""" with _ModuleLockManager(name): return _find_and_load_unlocked(name, import_) def _gcd_import(name, package=None, level=0): """Import and return the module based on its name, the package the call is being made from, and the level adjustment. This function represents the greatest common denominator of functionality between import_module and __import__. This includes setting __package__ if the loader did not. """ _sanity_check(name, package, level) if level > 0: name = _resolve_name(name, package, level) _imp.acquire_lock() if name not in sys.modules: return _find_and_load(name, _gcd_import) module = sys.modules[name] if module is None: _imp.release_lock() message = ('import of {} halted; ' 'None in sys.modules'.format(name)) raise ImportError(message, name=name) _lock_unlock_module(name) return module def _handle_fromlist(module, fromlist, import_): """Figure out what __import__ should return. The import_ parameter is a callable which takes the name of module to import. It is required to decouple the function from assuming importlib's import implementation is desired. """ # The hell that is fromlist ... # If a package was imported, try to import stuff from fromlist. if hasattr(module, '__path__'): if '*' in fromlist: fromlist = list(fromlist) fromlist.remove('*') if hasattr(module, '__all__'): fromlist.extend(module.__all__) for x in fromlist: if not hasattr(module, x): from_name = '{}.{}'.format(module.__name__, x) try: _call_with_frames_removed(import_, from_name) except ImportError as exc: # Backwards-compatibility dictates we ignore failed # imports triggered by fromlist for modules that don't # exist. if str(exc).startswith(_ERR_MSG_PREFIX): if exc.name == from_name: continue raise return module def _calc___package__(globals): """Calculate what __package__ should be. __package__ is not guaranteed to be defined or could be set to None to represent that its proper value is unknown. """ package = globals.get('__package__') if package is None: package = globals['__name__'] if '__path__' not in globals: package = package.rpartition('.')[0] return package def _get_supported_file_loaders(): """Returns a list of file-based module loaders. Each item is a tuple (loader, suffixes). """ extensions = ExtensionFileLoader, _imp.extension_suffixes() source = SourceFileLoader, SOURCE_SUFFIXES bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES return [extensions, source, bytecode] def __import__(name, globals=None, locals=None, fromlist=(), level=0): """Import a module. The 'globals' argument is used to infer where the import is occuring from to handle relative imports. The 'locals' argument is ignored. The 'fromlist' argument specifies what should exist as attributes on the module being imported (e.g. ``from module import <fromlist>``). The 'level' argument represents the package location to import from in a relative import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). """ if level == 0: module = _gcd_import(name) else: globals_ = globals if globals is not None else {} package = _calc___package__(globals_) module = _gcd_import(name, package, level) if not fromlist: # Return up to the first dot in 'name'. This is complicated by the fact # that 'name' may be relative. if level == 0: return _gcd_import(name.partition('.')[0]) elif not name: return module else: # Figure out where to slice the module's name up to the first dot # in 'name'. cut_off = len(name) - len(name.partition('.')[0]) # Slice end needs to be positive to alleviate need to special-case # when ``'.' not in name``. return sys.modules[module.__name__[:len(module.__name__)-cut_off]] else: return _handle_fromlist(module, fromlist, _gcd_import) def _builtin_from_name(name): spec = BuiltinImporter.find_spec(name) if spec is None: raise ImportError('no built-in module named ' + name) methods = _SpecMethods(spec) return methods._load_unlocked() def _setup(sys_module, _imp_module): """Setup importlib by importing needed built-in modules and injecting them into the global namespace. As sys is needed for sys.modules access and _imp is needed to load built-in modules, those two modules must be explicitly passed in. """ global _imp, sys, BYTECODE_SUFFIXES _imp = _imp_module sys = sys_module if sys.flags.optimize: BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES else: BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES # Set up the spec for existing builtin/frozen modules. module_type = type(sys) for name, module in sys.modules.items(): if isinstance(module, module_type): if name in sys.builtin_module_names: loader = BuiltinImporter elif _imp.is_frozen(name): loader = FrozenImporter else: continue spec = _spec_from_module(module, loader) methods = _SpecMethods(spec) methods.init_module_attrs(module) # Directly load built-in modules needed during bootstrap. self_module = sys.modules[__name__] for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'): if builtin_name not in sys.modules: builtin_module = _builtin_from_name(builtin_name) else: builtin_module = sys.modules[builtin_name] setattr(self_module, builtin_name, builtin_module) # Directly load the os module (needed during bootstrap). os_details = ('posix', ['/']), ('nt', ['\\', '/']) for builtin_os, path_separators in os_details: # Assumption made in _path_join() assert all(len(sep) == 1 for sep in path_separators) path_sep = path_separators[0] if builtin_os in sys.modules: os_module = sys.modules[builtin_os] break else: try: os_module = _builtin_from_name(builtin_os) break except ImportError: continue else: raise ImportError('importlib requires posix or nt') setattr(self_module, '_os', os_module) setattr(self_module, 'path_sep', path_sep) setattr(self_module, 'path_separators', ''.join(path_separators)) # Directly load the _thread module (needed during bootstrap). try: thread_module = _builtin_from_name('_thread') except ImportError: # Python was built without threads thread_module = None setattr(self_module, '_thread', thread_module) # Directly load the _weakref module (needed during bootstrap). weakref_module = _builtin_from_name('_weakref') setattr(self_module, '_weakref', weakref_module) # Directly load the winreg module (needed during bootstrap). if builtin_os == 'nt': winreg_module = _builtin_from_name('winreg') setattr(self_module, '_winreg', winreg_module) # Constants setattr(self_module, '_relax_case', _make_relax_case()) EXTENSION_SUFFIXES.extend(_imp.extension_suffixes()) if builtin_os == 'nt': SOURCE_SUFFIXES.append('.pyw') if '_d.pyd' in EXTENSION_SUFFIXES: WindowsRegistryFinder.DEBUG_BUILD = True def _install(sys_module, _imp_module): """Install importlib as the implementation of import.""" _setup(sys_module, _imp_module) supported_loaders = _get_supported_file_loaders() sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) sys.meta_path.append(BuiltinImporter) sys.meta_path.append(FrozenImporter) if _os.__name__ == 'nt': sys.meta_path.append(WindowsRegistryFinder) sys.meta_path.append(PathFinder)
iulian787/spack
refs/heads/develop
var/spack/repos/builtin/packages/snap-korf/package.py
2
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class SnapKorf(MakefilePackage): """SNAP is a general purpose gene finding program suitable for both eukaryotic and prokaryotic genomes.""" homepage = "http://korflab.ucdavis.edu/software.html" url = "http://korflab.ucdavis.edu/Software/snap-2013-11-29.tar.gz" version('2013-11-29', sha256='e2a236392d718376356fa743aa49a987aeacd660c6979cee67121e23aeffc66a') depends_on('perl', type=('build', 'run')) depends_on('boost') depends_on('sqlite') depends_on('sparsehash') conflicts('%gcc@5:', when='@2013-11-29') def install(self, spec, prefix): mkdirp(prefix.bin) progs = ['snap', 'fathom', 'forge', 'depend', 'exonpairs', 'hmm-info'] for p in progs: install(p, prefix.bin) install('*.pl', prefix.bin) install_tree('Zoe', prefix.Zoe) install_tree('HMM', prefix.HMM) install_tree('DNA', prefix.DNA) def setup_run_environment(self, env): env.set('ZOE', self.prefix) env.prepend_path('PATH', self.prefix)
greenaddress/pycoin
refs/heads/master
pycoin/convention/__init__.py
4
import decimal SATOSHI_PER_COIN = decimal.Decimal(1e8) COIN_PER_SATOSHI = decimal.Decimal(1)/SATOSHI_PER_COIN def satoshi_to_btc(satoshi_count): if satoshi_count == 0: return decimal.Decimal(0) r = satoshi_count * COIN_PER_SATOSHI return r.normalize() def btc_to_satoshi(btc): return int(decimal.Decimal(btc) * SATOSHI_PER_COIN)
uberamd/NGECore2
refs/heads/master
scripts/object/tangible/component/armor/armor_layer_heat.py
85615
import sys def setup(core, object): return
piers7/rpi_ws281x
refs/heads/master
python/setup.py
27
# Python wrapper for the rpi_ws281x library. # Author: Tony DiCola (tony@tonydicola.com) from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages, Extension setup(name = 'rpi_ws281x', version = '1.0.0', author = 'Jeremy Garff', author_email = 'jer@jers.net', description = 'Userspace Raspberry Pi PWM library for WS281X LEDs.', license = 'MIT', url = 'https://github.com/jgarff/rpi_ws281x/', py_modules = ['neopixel'], ext_modules = [Extension('_rpi_ws281x', sources=['rpi_ws281x.i'], library_dirs=['../.'], libraries=['ws2811'])])
eleweek/twitter_sentiment_analysis
refs/heads/master
run.py
1
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import models import datasets import inspect import logging import random import numpy as np from sklearn.neural_network import MLPClassifier FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s" logging.basicConfig(level=logging.INFO, format=FORMAT) def train_features_model(dataset_name, model_class_name, model_file_name, *args): model_class = find_model_class_by_name(model_class_name) model = model_class.create_from_argv(*args) dataset_train, dataset_test = load_dataset_by_name(dataset_name) model.train(dataset_train) model.save(model_file_name) def convert_dataset_to_features(tweets, model): logging.debug("convert_dataset_to_features: number of features is {}".format(model.get_features_number())) labels = np.zeros(len(tweets)) for i, tweet in enumerate(tweets): labels[i] = tweet.polarity assert tweet.polarity is not None and -1.0 <= tweet.polarity <= 1.0 if hasattr(model, "batch_get_features"): arrays = model.batch_get_features(tweets) assert len(arrays) == len(tweets) else: arrays = np.zeros((len(tweets), model.get_features_number())) for i, tweet in enumerate(tweets): arrays[i] = model.get_features(tweet) return arrays, labels def test_features_model(dataset_name, model_class_name, model_file_name, *args): model_class = find_model_class_by_name(model_class_name) model = model_class.load(model_file_name) dataset_train, dataset_test = load_dataset_by_name(dataset_name) def remove_unrated(tweets): return [tweet for tweet in tweets if tweet.polarity is not None] dataset_test = remove_unrated([tweet for tweet in dataset_test if not tweet.is_neutral()]) dataset_train = remove_unrated(dataset_train) datasets.print_dataset_stats(dataset_train, "{}:train".format(dataset_name)) datasets.print_dataset_stats(dataset_test, "{}:test".format(dataset_name)) train_arrays, train_labels = convert_dataset_to_features(dataset_train, model) print(np.where(np.isnan(train_arrays))) print(np.where(np.isinf(train_arrays))) test_arrays, test_labels = convert_dataset_to_features(dataset_test, model) logging.info("Starting fitting") # clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(150, 10), random_state=1) clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(30, ), random_state=1) clf.fit(train_arrays, train_labels) logging.info("Done fitting") print(clf.score(train_arrays, train_labels)) print(clf.score(test_arrays, test_labels)) if __name__ == "__main__": action = sys.argv[1] if action == "train_features_model": train_features_model(*sys.argv[2:]) elif action == "test_features_model": test_features_model(*sys.argv[2:]) else: raise Exception("Unknown action {}".format(action))
ganeshnalawade/ansible
refs/heads/devel
test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py
68
#!/usr/bin/env python from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import sys def main(): for path in sys.argv[1:] or sys.stdin.read().splitlines(): with open(path, 'r') as path_fd: for line, text in enumerate(path_fd.readlines()): match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text) if match: print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % ( path, line + 1, match.start(1) + 1)) if __name__ == '__main__': main()
sinbazhou/odoo
refs/heads/8.0
addons/portal/tests/test_portal.py
198
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.mail.tests.common import TestMail from openerp.exceptions import AccessError from openerp.osv.orm import except_orm from openerp.tools.misc import mute_logger class test_portal(TestMail): @classmethod def setUpClass(cls): super(test_portal, cls).setUpClass() cr, uid = cls.cr, cls.uid # Find Portal group cls.group_portal_id = cls.env.ref('base.group_portal').id # Create Chell (portal user) cls.user_chell_id = cls.res_users.create(cr, uid, { 'name': 'Chell Gladys', 'login': 'chell', 'email': 'chell@gladys.portal', 'groups_id': [(6, 0, [cls.group_portal_id])] }, {'no_reset_password': True}) cls.user_chell = cls.res_users.browse(cr, uid, cls.user_chell_id) cls.partner_chell_id = cls.user_chell.partner_id.id # Create a PigsPortal group cls.group_port_id = cls.mail_group.create(cr, uid, {'name': 'PigsPortal', 'public': 'groups', 'group_public_id': cls.group_portal_id}, {'mail_create_nolog': True}) # Set an email address for the user running the tests, used as Sender for outgoing mails cls.res_users.write(cr, uid, uid, {'email': 'test@localhost'}) @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models') def test_00_mail_access_rights(self): """ Test basic mail_message and mail_group access rights for portal users. """ cr, uid = self.cr, self.uid mail_compose = self.registry('mail.compose.message') # Prepare group: Pigs and PigsPortal pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message') port_msg_id = self.mail_group.message_post(cr, uid, self.group_port_id, body='Message') # Do: Chell browses Pigs -> ko, employee group chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id) with self.assertRaises(except_orm): trigger_read = chell_pigs.name # Do: Chell posts a message on Pigs, crash because can not write on group or is not in the followers with self.assertRaises(AccessError): self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='Message') # Do: Chell is added into Pigs followers and browse it -> ok for messages, ko for partners (no read permission) self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_chell_id]) chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id) trigger_read = chell_pigs.name for message in chell_pigs.message_ids: trigger_read = message.subject for partner in chell_pigs.message_follower_ids: if partner.id == self.partner_chell_id: # Chell can read her own partner record continue with self.assertRaises(except_orm): trigger_read = partner.name # Do: Chell comments Pigs, ok because he is now in the followers self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='I love Pigs') # Do: Chell creates a mail.compose.message record on Pigs, because he uses the wizard compose_id = mail_compose.create(cr, self.user_chell_id, {'subject': 'Subject', 'body': 'Body text', 'partner_ids': []}, {'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_pigs_id}) mail_compose.send_mail(cr, self.user_chell_id, [compose_id]) # Do: Chell replies to a Pigs message using the composer compose_id = mail_compose.create(cr, self.user_chell_id, {'subject': 'Subject', 'body': 'Body text'}, {'default_composition_mode': 'comment', 'default_parent_id': pigs_msg_id}) mail_compose.send_mail(cr, self.user_chell_id, [compose_id]) # Do: Chell browses PigsPortal -> ok because groups security, ko for partners (no read permission) chell_port = self.mail_group.browse(cr, self.user_chell_id, self.group_port_id) trigger_read = chell_port.name for message in chell_port.message_ids: trigger_read = message.subject for partner in chell_port.message_follower_ids: with self.assertRaises(except_orm): trigger_read = partner.name def test_10_mail_invite(self): cr, uid = self.cr, self.uid mail_invite = self.registry('mail.wizard.invite') base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='') # Carine Poilvache, with email, should receive emails for comments and emails partner_carine_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c'}) # Do: create a mail_wizard_invite, validate it self._init_mock_build_email() context = {'default_res_model': 'mail.group', 'default_res_id': self.group_pigs_id} mail_invite_id = mail_invite.create(cr, uid, {'partner_ids': [(4, partner_carine_id)], 'send_mail': True}, context) mail_invite.add_followers(cr, uid, [mail_invite_id]) # Test: Pigs followers should contain Admin and Bert group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id) follower_ids = [follower.id for follower in group_pigs.message_follower_ids] self.assertEqual(set(follower_ids), set([self.partner_admin_id, partner_carine_id]), 'Pigs followers after invite is incorrect') # Test: partner must have been prepared for signup partner_carine = self.res_partner.browse(cr, uid, partner_carine_id) self.assertTrue(partner_carine.signup_valid, 'partner has not been prepared for signup') self.assertTrue(base_url in partner_carine.signup_url, 'signup url is incorrect') self.assertTrue(cr.dbname in partner_carine.signup_url, 'signup url is incorrect') self.assertTrue(partner_carine.signup_token in partner_carine.signup_url, 'signup url is incorrect') # Test: (pretend to) send email and check subject, body self.assertEqual(len(self._build_email_kwargs_list), 1, 'sent email number incorrect, should be only for Bert') for sent_email in self._build_email_kwargs_list: self.assertEqual(sent_email.get('subject'), 'Invitation to follow Discussion group: Pigs', 'invite: subject of invitation email is incorrect') self.assertIn('Administrator invited you to follow Discussion group document: Pigs', sent_email.get('body'), 'invite: body of invitation email is incorrect') self.assertIn(partner_carine.signup_token, sent_email.get('body'), 'invite: body of invitation email does not contain signup token') def test_20_notification_url(self): """ Tests designed to test the URL added in notification emails. """ cr, uid, group_pigs = self.cr, self.uid, self.group_pigs # Partner data partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id) partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'}) partner_bert = self.res_partner.browse(cr, uid, partner_bert_id) # Mail data mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'}) mail = self.mail_mail.browse(cr, uid, mail_mail_id) # Test: link for nobody -> None url = self.mail_mail._get_partner_access_link(cr, uid, mail) self.assertEqual(url, None, 'notification email: mails not send to a specific partner should not have any URL') # Test: link for partner -> signup URL url = self.mail_mail._get_partner_access_link(cr, uid, mail, partner=partner_bert) self.assertIn(partner_bert.signup_token, url, 'notification email: mails send to a not-user partner should contain the signup token') # Test: link for user -> signin url = self.mail_mail._get_partner_access_link(cr, uid, mail, partner=partner_raoul) self.assertIn('action=mail.action_mail_redirect', url, 'notification email: link should contain the redirect action') self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url, 'notification email: link should contain the user login') @mute_logger('openerp.addons.mail.mail_thread', 'openerp.models') def test_21_inbox_redirection(self): """ Tests designed to test the inbox redirection of emails notification URLs. """ cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds') model, port_act_id = self.ir_model_data.get_object_reference(cr, uid, 'portal', 'action_mail_inbox_feeds_portal') # Data: post a message on pigs msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id, self.partner_chell_id], type='comment', subtype='mail.mt_comment') # No specific parameters -> should redirect to Inbox action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {}}) self.assertEqual(action.get('type'), 'ir.actions.client', 'URL redirection: action without parameters should redirect to client action Inbox') self.assertEqual(action.get('id'), act_id, 'URL redirection: action without parameters should redirect to client action Inbox') # Bert has read access to Pigs -> should redirect to form view of Pigs action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {'message_id': msg_id}}) self.assertEqual(action.get('type'), 'ir.actions.act_window', 'URL redirection: action with message_id for read-accredited user should redirect to Pigs') self.assertEqual(action.get('res_id'), group_pigs.id, 'URL redirection: action with message_id for read-accredited user should redirect to Pigs') # Bert has no read access to Pigs -> should redirect to Inbox action = self.mail_thread.message_redirect_action(cr, self.user_bert_id, {'params': {'message_id': msg_id}}) self.assertEqual(action.get('type'), 'ir.actions.client', 'URL redirection: action without parameters should redirect to client action Inbox') self.assertEqual(action.get('id'), act_id, 'URL redirection: action without parameters should redirect to client action Inbox') # Chell has no read access to pigs -> should redirect to Portal Inbox action = self.mail_thread.message_redirect_action(cr, self.user_chell_id, {'params': {'message_id': msg_id}}) self.assertEqual(action.get('type'), 'ir.actions.client', 'URL redirection: action without parameters should redirect to client action Inbox') self.assertEqual(action.get('id'), port_act_id, 'URL redirection: action without parameters should redirect to client action Inbox') def test_30_message_read(self): cr, uid, group_port_id = self.cr, self.uid, self.group_port_id # Data: custom subtypes mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'}) self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id}) # Data: post messages with various subtypes msg1_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body1', type='comment', subtype='mail.mt_comment') msg2_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body2', type='comment', subtype='mail.mt_group_public') msg3_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body3', type='comment', subtype='mail.mt_comment') msg4_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body4', type='comment') # msg5_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body5', type='notification') # Do: Chell search messages: should not see internal notes (comment without subtype) msg_ids = self.mail_message.search(cr, self.user_chell_id, [('model', '=', 'mail.group'), ('res_id', '=', group_port_id)]) self.assertEqual(set(msg_ids), set([msg1_id, msg2_id, msg3_id]), 'mail_message: portal user has access to messages he should not read') # Do: Chell read messages she can read self.mail_message.read(cr, self.user_chell_id, msg_ids, ['body', 'type', 'subtype_id']) # Do: Chell read a message she should not be able to read with self.assertRaises(except_orm): self.mail_message.read(cr, self.user_chell_id, [msg4_id], ['body', 'type', 'subtype_id'])
billwanjohi/ansible-modules-core
refs/heads/devel
cloud/quantum_router_interface.py
47
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <benno@ansible.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") DOCUMENTATION = ''' --- module: quantum_router_interface version_added: "1.2" short_description: Attach/Dettach a subnet's interface to a router description: - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: 'yes' login_tenant_name: description: - The tenant name of the login user required: true default: 'yes' auth_url: description: - The keystone URL for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present router_name: description: - Name of the router to which the subnet's interface should be attached. required: true default: None subnet_name: description: - Name of the subnet to whose interface should be attached to the router. required: true default: None tenant_name: description: - Name of the tenant whose subnet has to be attached. required: false default: None requirements: ["quantumclient", "keystoneclient"] ''' EXAMPLES = ''' # Attach tenant1's subnet to the external router - quantum_router_interface: state=present login_username=admin login_password=admin login_tenant_name=admin tenant_name=tenant1 router_name=external_route subnet_name=t1subnet ''' _os_keystone = None _os_tenant_id = None def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: login_tenant_name = module.params['login_tenant_name'] else: login_tenant_name = module.params['tenant_name'] for tenant in _os_keystone.tenants.list(): if tenant.name == login_tenant_name: _os_tenant_id = tenant.id break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], } try: routers = neutron.list_routers(**kwargs) except Exception, e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None return routers['routers'][0]['id'] def _get_subnet_id(module, neutron): subnet_id = None kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['subnet_name'], } try: subnets = neutron.list_subnets(**kwargs) except Exception, e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] def _get_port_id(neutron, module, router_id, subnet_id): kwargs = { 'tenant_id': _os_tenant_id, 'device_id': router_id, } try: ports = neutron.list_ports(**kwargs) except Exception, e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None for port in ports['ports']: for subnet in port['fixed_ips']: if subnet['subnet_id'] == subnet_id: return port['id'] return None def _add_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: neutron.add_interface_router(router_id, kwargs) except Exception, e: module.fail_json(msg = "Error in adding interface to router: %s" % e.message) return True def _remove_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: neutron.remove_interface_router(router_id, kwargs) except Exception, e: module.fail_json(msg="Error in removing interface from router: %s" % e.message) return True def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( router_name = dict(required=True), subnet_name = dict(required=True), tenant_name = dict(default=None), state = dict(default='present', choices=['absent', 'present']), )) module = AnsibleModule(argument_spec=argument_spec) neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) router_id = _get_router_id(module, neutron) if not router_id: module.fail_json(msg="failed to get the router id, please check the router name") subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.fail_json(msg="failed to get the subnet id, please check the subnet name") if module.params['state'] == 'present': port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: _add_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="created", id=port_id) module.exit_json(changed=False, result="success", id=port_id) if module.params['state'] == 'absent': port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: module.exit_json(changed = False, result = "Success") _remove_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="Deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main()
piotroxp/scibibscan
refs/heads/master
scib/lib/python3.5/site-packages/wheel/util.py
219
"""Utility functions.""" import sys import os import base64 import json import hashlib __all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8', 'to_json', 'from_json', 'matches_requirement'] def urlsafe_b64encode(data): """urlsafe_b64encode without padding""" return base64.urlsafe_b64encode(data).rstrip(binary('=')) def urlsafe_b64decode(data): """urlsafe_b64decode without padding""" pad = b'=' * (4 - (len(data) & 3)) return base64.urlsafe_b64decode(data + pad) def to_json(o): '''Convert given data to JSON.''' return json.dumps(o, sort_keys=True) def from_json(j): '''Decode a JSON payload.''' return json.loads(j) def open_for_csv(name, mode): if sys.version_info[0] < 3: nl = {} bin = 'b' else: nl = { 'newline': '' } bin = '' return open(name, mode + bin, **nl) try: unicode def utf8(data): '''Utf-8 encode data.''' if isinstance(data, unicode): return data.encode('utf-8') return data except NameError: def utf8(data): '''Utf-8 encode data.''' if isinstance(data, str): return data.encode('utf-8') return data try: # For encoding ascii back and forth between bytestrings, as is repeatedly # necessary in JSON-based crypto under Python 3 unicode def native(s): return s def binary(s): if isinstance(s, unicode): return s.encode('ascii') return s except NameError: def native(s): if isinstance(s, bytes): return s.decode('ascii') return s def binary(s): if isinstance(s, str): return s.encode('ascii') class HashingFile(object): def __init__(self, fd, hashtype='sha256'): self.fd = fd self.hashtype = hashtype self.hash = hashlib.new(hashtype) self.length = 0 def write(self, data): self.hash.update(data) self.length += len(data) self.fd.write(data) def close(self): self.fd.close() def digest(self): if self.hashtype == 'md5': return self.hash.hexdigest() digest = self.hash.digest() return self.hashtype + '=' + native(urlsafe_b64encode(digest)) if sys.platform == 'win32': import ctypes.wintypes # CSIDL_APPDATA for reference - not used here for compatibility with # dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28, CSIDL_COMMON_APPDATA=35) def get_path(name): SHGFP_TYPE_CURRENT = 0 buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH) ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf) return buf.value def save_config_path(*resource): appdata = get_path("CSIDL_LOCAL_APPDATA") path = os.path.join(appdata, *resource) if not os.path.isdir(path): os.makedirs(path) return path def load_config_paths(*resource): ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"] for id in ids: base = get_path(id) path = os.path.join(base, *resource) if os.path.exists(path): yield path else: def save_config_path(*resource): import xdg.BaseDirectory return xdg.BaseDirectory.save_config_path(*resource) def load_config_paths(*resource): import xdg.BaseDirectory return xdg.BaseDirectory.load_config_paths(*resource) def matches_requirement(req, wheels): """List of wheels matching a requirement. :param req: The requirement to satisfy :param wheels: List of wheels to search. """ try: from pkg_resources import Distribution, Requirement except ImportError: raise RuntimeError("Cannot use requirements without pkg_resources") req = Requirement.parse(req) selected = [] for wf in wheels: f = wf.parsed_filename dist = Distribution(project_name=f.group("name"), version=f.group("ver")) if dist in req: selected.append(wf) return selected
mrkm4ntr/incubator-airflow
refs/heads/master
airflow/providers/docker/__init__.py
5130
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License.
zsoltika/rdbms-subsetter
refs/heads/master
setup.py
8
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() curdir = os.path.dirname(os.path.realpath(__file__)) readme = open(os.path.join(curdir, 'README.rst')).read() setup( name='rdbms-subsetter', version='0.2.1', description='Generate consistent subset of an RDBMS', long_description=readme, author='Catherine Devlin', author_email='catherine.devlin@gsa.gov', url='https://github.com/18f/https://github.com/18F/rdbms-subsetter', install_requires=[ "sqlalchemy", ], license="CC0", keywords='database testing', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Topic :: Database', 'Topic :: Software Development :: Testing', ], py_modules=['subsetter'], entry_points={ 'console_scripts': [ 'rdbms-subsetter = subsetter:generate', ] }, )
caphrim007/ansible
refs/heads/devel
test/units/module_utils/basic/test_deprecate_warn.py
127
# -*- coding: utf-8 -*- # # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import json import pytest @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) def test_warn(am, capfd): am.warn('warning1') with pytest.raises(SystemExit): am.exit_json(warnings=['warning2']) out, err = capfd.readouterr() assert json.loads(out)['warnings'] == ['warning1', 'warning2'] @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) def test_deprecate(am, capfd): am.deprecate('deprecation1') am.deprecate('deprecation2', '2.3') with pytest.raises(SystemExit): am.exit_json(deprecations=['deprecation3', ('deprecation4', '2.4')]) out, err = capfd.readouterr() output = json.loads(out) assert ('warnings' not in output or output['warnings'] == []) assert output['deprecations'] == [ {u'msg': u'deprecation1', u'version': None}, {u'msg': u'deprecation2', u'version': '2.3'}, {u'msg': u'deprecation3', u'version': None}, {u'msg': u'deprecation4', u'version': '2.4'}, ] @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) def test_deprecate_without_list(am, capfd): with pytest.raises(SystemExit): am.exit_json(deprecations='Simple deprecation warning') out, err = capfd.readouterr() output = json.loads(out) assert ('warnings' not in output or output['warnings'] == []) assert output['deprecations'] == [ {u'msg': u'Simple deprecation warning', u'version': None}, ]
GabrielNicolasAvellaneda/kafka
refs/heads/trunk
system_test/mirror_maker_testsuite/mirror_maker_test.py
70
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #!/usr/bin/env python # =================================== # mirror_maker_test.py # =================================== import inspect import logging import os import signal import subprocess import sys import time import traceback from system_test_env import SystemTestEnv sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR) from setup_utils import SetupUtils from replication_utils import ReplicationUtils import system_test_utils from testcase_env import TestcaseEnv # product specific: Kafka import kafka_system_test_utils import metrics class MirrorMakerTest(ReplicationUtils, SetupUtils): testModuleAbsPathName = os.path.realpath(__file__) testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName)) def __init__(self, systemTestEnv): # SystemTestEnv - provides cluster level environment settings # such as entity_id, hostname, kafka_home, java_home which # are available in a list of dictionary named # "clusterEntityConfigDictList" self.systemTestEnv = systemTestEnv super(MirrorMakerTest, self).__init__(self) # dict to pass user-defined attributes to logger argument: "extra" d = {'name_of_class': self.__class__.__name__} def signal_handler(self, signal, frame): self.log_message("Interrupt detected - User pressed Ctrl+c") # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific self.log_message("stopping all entities - please wait ...") kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) sys.exit(1) def runTest(self): # ====================================================================== # get all testcase directories under this testsuite # ====================================================================== testCasePathNameList = system_test_utils.get_dir_paths_with_prefix( self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX) testCasePathNameList.sort() replicationUtils = ReplicationUtils(self) # ============================================================= # launch each testcase one by one: testcase_1, testcase_2, ... # ============================================================= for testCasePathName in testCasePathNameList: skipThisTestCase = False try: # ====================================================================== # A new instance of TestcaseEnv to keep track of this testcase's env vars # and initialize some env vars as testCasePathName is available now # ====================================================================== self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self) self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName) self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"] # ====================================================================== # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json # ====================================================================== testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"] if self.systemTestEnv.printTestDescriptionsOnly: self.testcaseEnv.printTestCaseDescription(testcaseDirName) continue elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName): self.log_message("Skipping : " + testcaseDirName) skipThisTestCase = True continue else: self.testcaseEnv.printTestCaseDescription(testcaseDirName) system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName) # ============================================================================== # # ============================================================================== # # Product Specific Testing Code Starts Here: # # ============================================================================== # # ============================================================================== # # initialize self.testcaseEnv with user-defined environment variables (product specific) self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = "" self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False # initialize signal handler signal.signal(signal.SIGINT, self.signal_handler) # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file: # system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data( self.testcaseEnv.testcasePropJsonPathName) # clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv) # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase # for collecting logs from remote machines kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv) # TestcaseEnv - initialize producer & consumer config / log file pathnames kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv) # generate remote hosts log/config dirs if not exist kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv) # generate properties files for zookeeper, kafka, producer, consumer and mirror-maker: # 1. copy system_test/<suite_name>_testsuite/config/*.properties to # system_test/<suite_name>_testsuite/testcase_<n>/config/ # 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config # by overriding the settings specified in: # system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName, self.testcaseEnv, self.systemTestEnv) # ============================================= # preparing all entities to start the test # ============================================= self.log_message("starting zookeepers") kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv) self.anonLogger.info("sleeping for 2s") time.sleep(2) self.log_message("starting brokers") kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv) self.anonLogger.info("sleeping for 5s") time.sleep(5) self.log_message("creating topics") kafka_system_test_utils.create_topic_for_producer_performance(self.systemTestEnv, self.testcaseEnv) self.anonLogger.info("sleeping for 5s") time.sleep(5) self.log_message("starting mirror makers") kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv) self.anonLogger.info("sleeping for 10s") time.sleep(10) # ============================================= # starting producer # ============================================= self.log_message("starting producer in the background") kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False) msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"] self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages") time.sleep(int(msgProducingFreeTimeSec)) # ============================================= # A while-loop to bounce mirror maker as specified # by "num_iterations" in testcase_n_properties.json # ============================================= i = 1 numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) bouncedEntityDownTimeSec = 15 try: bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"]) except: pass while i <= numIterations: self.log_message("Iteration " + str(i) + " of " + str(numIterations)) # ============================================= # Bounce Mirror Maker if specified in testcase config # ============================================= bounceMirrorMaker = self.testcaseEnv.testcaseArgumentsDict["bounce_mirror_maker"] self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker) if (bounceMirrorMaker.lower() == "true"): clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts( clusterConfigList, "role", "mirror_maker", "entity_id") stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0] mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId] self.log_message("stopping mirror maker : " + mirrorMakerPPid) kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid) self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec") time.sleep(bouncedEntityDownTimeSec) # starting previously terminated broker self.log_message("starting the previously terminated mirror maker") kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId) self.anonLogger.info("sleeping for 15s") time.sleep(15) i += 1 # while loop # ============================================= # tell producer to stop # ============================================= self.testcaseEnv.lock.acquire() self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True time.sleep(1) self.testcaseEnv.lock.release() time.sleep(1) # ============================================= # wait for producer thread's update of # "backgroundProducerStopped" to be "True" # ============================================= while 1: self.testcaseEnv.lock.acquire() self.logger.info("status of backgroundProducerStopped : [" + \ str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d) if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: time.sleep(1) self.testcaseEnv.lock.release() self.logger.info("all producer threads completed", extra=self.d) break time.sleep(1) self.testcaseEnv.lock.release() time.sleep(2) self.anonLogger.info("sleeping for 15s") time.sleep(15) self.anonLogger.info("terminate Mirror Maker") cmdStr = "ps auxw | grep Mirror | grep -v grep | tr -s ' ' | cut -f2 -d ' ' | xargs kill -15" subproc = system_test_utils.sys_call_return_subproc(cmdStr) for line in subproc.stdout.readlines(): line = line.rstrip('\n') self.anonLogger.info("#### ["+line+"]") self.anonLogger.info("sleeping for 15s") time.sleep(15) # ============================================= # starting consumer # ============================================= self.log_message("starting consumer in the background") kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv) self.anonLogger.info("sleeping for 10s") time.sleep(10) # ============================================= # this testcase is completed - stop all entities # ============================================= self.log_message("stopping all entities") for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items(): kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items(): kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) # make sure all entities are stopped kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv) # ============================================= # collect logs from remote hosts # ============================================= kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) # ============================================= # validate the data matched and checksum # ============================================= self.log_message("validating data matched") kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils) kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "source") kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "target") # ============================================= # draw graphs # ============================================= metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, self.testcaseEnv, self.systemTestEnv.clusterEntityConfigDictList) # build dashboard, one for each role metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME, self.testcaseEnv.testCaseDashboardsDir, self.systemTestEnv.clusterEntityConfigDictList) except Exception as e: self.log_message("Exception while running test {0}".format(e)) traceback.print_exc() self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED" finally: if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly: self.log_message("stopping all entities - please wait ...") kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
cherbear263/gulp-basic
refs/heads/master
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
1789
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode-ninja wrapper project file generator. This updates the data structures passed to the Xcode gyp generator to build with ninja instead. The Xcode project itself is transformed into a list of executable targets, each with a build step to build with ninja, and a target with every source and resource file. This appears to sidestep some of the major performance headaches experienced using complex projects and large number of targets within Xcode. """ import errno import gyp.generator.ninja import os import re import xml.sax.saxutils def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string) def _TargetFromSpec(old_spec, params): """ Create fake target for xcode-ninja wrapper. """ # Determine ninja top level build dir (e.g. /path/to/out). ninja_toplevel = None jobs = 0 if params: options = params['options'] ninja_toplevel = \ os.path.join(options.toplevel_dir, gyp.generator.ninja.ComputeOutputDir(params)) jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) target_name = old_spec.get('target_name') product_name = old_spec.get('product_name', target_name) product_extension = old_spec.get('product_extension') ninja_target = {} ninja_target['target_name'] = target_name ninja_target['product_name'] = product_name if product_extension: ninja_target['product_extension'] = product_extension ninja_target['toolset'] = old_spec.get('toolset') ninja_target['default_configuration'] = old_spec.get('default_configuration') ninja_target['configurations'] = {} # Tell Xcode to look in |ninja_toplevel| for build products. new_xcode_settings = {} if ninja_toplevel: new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel if 'configurations' in old_spec: for config in old_spec['configurations'].iterkeys(): old_xcode_settings = \ old_spec['configurations'][config].get('xcode_settings', {}) if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] ninja_target['configurations'][config] = {} ninja_target['configurations'][config]['xcode_settings'] = \ new_xcode_settings ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) ninja_target['ios_watchkit_extension'] = \ old_spec.get('ios_watchkit_extension', 0) ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) ninja_target['type'] = old_spec['type'] if ninja_toplevel: ninja_target['actions'] = [ { 'action_name': 'Compile and copy %s via ninja' % target_name, 'inputs': [], 'outputs': [], 'action': [ 'env', 'PATH=%s' % os.environ['PATH'], 'ninja', '-C', new_xcode_settings['CONFIGURATION_BUILD_DIR'], target_name, ], 'message': 'Compile and copy %s via ninja' % target_name, }, ] if jobs > 0: ninja_target['actions'][0]['action'].extend(('-j', jobs)) return ninja_target def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): """Limit targets for Xcode wrapper. Xcode sometimes performs poorly with too many targets, so only include proper executable targets, with filters to customize. Arguments: target_extras: Regular expression to always add, matching any target. executable_target_pattern: Regular expression limiting executable targets. spec: Specifications for target. """ target_name = spec.get('target_name') # Always include targets matching target_extras. if target_extras is not None and re.search(target_extras, target_name): return True # Otherwise just show executable targets. if spec.get('type', '') == 'executable' and \ spec.get('product_extension', '') != 'bundle': # If there is a filter and the target does not match, exclude the target. if executable_target_pattern is not None: if not re.search(executable_target_pattern, target_name): return False return True return False def CreateWrapper(target_list, target_dicts, data, params): """Initialize targets for the ninja wrapper. This sets up the necessary variables in the targets to generate Xcode projects that use ninja as an external builder. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dict of flattened build files keyed on gyp path. params: Dict of global options for gyp. """ orig_gyp = params['build_files'][0] for gyp_name, gyp_dict in data.iteritems(): if gyp_name == orig_gyp: depth = gyp_dict['_DEPTH'] # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE # and prepend .ninja before the .gyp extension. generator_flags = params.get('generator_flags', {}) main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) if main_gyp is None: (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) main_gyp = build_file_root + ".ninja" + build_file_ext # Create new |target_list|, |target_dicts| and |data| data structures. new_target_list = [] new_target_dicts = {} new_data = {} # Set base keys needed for |data|. new_data[main_gyp] = {} new_data[main_gyp]['included_files'] = [] new_data[main_gyp]['targets'] = [] new_data[main_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) # Normally the xcode-ninja generator includes only valid executable targets. # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to # executable targets that match the pattern. (Default all) executable_target_pattern = \ generator_flags.get('xcode_ninja_executable_target_pattern', None) # For including other non-executable targets, add the matching target name # to the |xcode_ninja_target_pattern| regular expression. (Default none) target_extras = generator_flags.get('xcode_ninja_target_pattern', None) for old_qualified_target in target_list: spec = target_dicts[old_qualified_target] if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): # Add to new_target_list. target_name = spec.get('target_name') new_target_name = '%s:%s#target' % (main_gyp, target_name) new_target_list.append(new_target_name) # Add to new_target_dicts. new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) # Add to new_data. for old_target in data[old_qualified_target.split(':')[0]]['targets']: if old_target['target_name'] == target_name: new_data_target = {} new_data_target['target_name'] = old_target['target_name'] new_data_target['toolset'] = old_target['toolset'] new_data[main_gyp]['targets'].append(new_data_target) # Create sources target. sources_target_name = 'sources_for_indexing' sources_target = _TargetFromSpec( { 'target_name' : sources_target_name, 'toolset': 'target', 'default_configuration': 'Default', 'mac_bundle': '0', 'type': 'executable' }, None) # Tell Xcode to look everywhere for headers. sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } sources = [] for target, target_dict in target_dicts.iteritems(): base = os.path.dirname(target) files = target_dict.get('sources', []) + \ target_dict.get('mac_bundle_resources', []) for action in target_dict.get('actions', []): files.extend(action.get('inputs', [])) # Remove files starting with $. These are mostly intermediate files for the # build system. files = [ file for file in files if not file.startswith('$')] # Make sources relative to root build file. relative_path = os.path.dirname(main_gyp) sources += [ os.path.relpath(os.path.join(base, file), relative_path) for file in files ] sources_target['sources'] = sorted(set(sources)) # Put sources_to_index in it's own gyp. sources_gyp = \ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") fully_qualified_target_name = \ '%s:%s#target' % (sources_gyp, sources_target_name) # Add to new_target_list, new_target_dicts and new_data. new_target_list.append(fully_qualified_target_name) new_target_dicts[fully_qualified_target_name] = sources_target new_data_target = {} new_data_target['target_name'] = sources_target['target_name'] new_data_target['_DEPTH'] = depth new_data_target['toolset'] = "target" new_data[sources_gyp] = {} new_data[sources_gyp]['targets'] = [] new_data[sources_gyp]['included_files'] = [] new_data[sources_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) new_data[sources_gyp]['targets'].append(new_data_target) # Write workspace to file. _WriteWorkspace(main_gyp, sources_gyp, params) return (new_target_list, new_target_dicts, new_data)
akbargumbira/inasafe
refs/heads/develop
safe/messaging/item/preformatted_text.py
16
""" InaSAFE Disaster risk assessment tool developed by AusAid - **Preformatted.** Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'marco@opengis.ch' __revision__ = '$Format:%H$' __date__ = '28/05/2013' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') from text import Text # FIXME (MB) remove when all to_* methods are implemented # pylint: disable=W0223 class PreformattedText(Text): """A representation for a preformatted text item. """ def __init__(self, text, **kwargs): """Constructor. :param text: A string to add to the message. :type text: str We pass the kwargs on to the base class so an exception is raised if invalid keywords were passed. See: http://stackoverflow.com/questions/13124961/ how-to-pass-arguments-efficiently-kwargs-in-python """ if 'style_class' in kwargs: my_class = '%s prettyprint' % kwargs['style_class'] kwargs['style_class'] = my_class super(PreformattedText, self).__init__(**kwargs) self.text = text def to_html(self): """Render as html <pre> element. :returns: The html representation. :rtype: str """ mytext = '<pre%s>\n%s</pre>' % (self.html_attributes(), self.text) return mytext def to_text(self): """Render as plain text. :param text: A string to add to the message. :type text: str """ return '%s' % self.text