repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
waxkinetic/fabcloudkit
|
refs/heads/master
|
fabcloudkit/tool/__init__.py
|
1
|
"""
fabcloudkit
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .git import *
from .gunicorn import *
from .keys import *
from .nginx import *
from .pip import *
from .pip_command import *
from .python import *
from .redis import *
from .supervisord import *
from .virtualenv import *
|
wastevensv/CSH_PMS
|
refs/heads/master
|
CSH_PMS/settings.py
|
1
|
"""
Django settings for CSH_PMS project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
try:
with open(os.path.join(BASE_DIR,'secret_key.txt')) as f:
SECRET_KEY = f.read().strip()
except FileNotFoundError:
SECRET_KEY = 'thisisnotasecuresecret'
pass
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pms',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.RemoteUserBackend',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'custom.CustomHeaderMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CSH_PMS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CSH_PMS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'static/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media/')
MEDIA_URL = '/media/'
MAX_IMAGE_XSIZE = 300.0
MAX_IMAGE_YSIZE = 200.0
|
vstoykov/django-cms
|
refs/heads/develop
|
cms/migrations/0067_auto__add_field_aliaspluginmodel_alias_placeholder__chg_field_aliasplu.py
|
59
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AliasPluginModel.alias_placeholder'
db.add_column(u'cms_aliaspluginmodel', 'alias_placeholder',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='alias_placeholder', null=True, to=orm['cms.Placeholder']),
keep_default=False)
# Changing field 'AliasPluginModel.plugin'
db.alter_column(u'cms_aliaspluginmodel', 'plugin_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['cms.CMSPlugin']))
def backwards(self, orm):
# Deleting field 'AliasPluginModel.alias_placeholder'
db.delete_column(u'cms_aliaspluginmodel', 'alias_placeholder_id')
# Changing field 'AliasPluginModel.plugin'
db.alter_column(u'cms_aliaspluginmodel', 'plugin_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['cms.CMSPlugin']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
ganeshnalawade/ansible
|
refs/heads/devel
|
lib/ansible/modules/ping.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
- This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
- For Windows targets, use the M(ansible.windows.win_ping) module instead.
- For Network targets, use the M(ansible.netcommon.net_ping) module instead.
options:
data:
description:
- Data to return for the C(ping) return value.
- If this parameter is set to C(crash), the module will cause an exception.
type: str
default: pong
seealso:
- module: ansible.netcommon.net_ping
- module: ansible.windows.win_ping
author:
- Ansible Core Team
- Michael DeHaan
notes:
- Supports C(check_mode).
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
# ansible webservers -m ping
- name: Example from an Ansible Playbook
ansible.builtin.ping:
- name: Induce an exception to see what happens
ansible.builtin.ping:
data: crash
'''
RETURN = '''
ping:
description: Value provided with the data parameter.
returned: success
type: str
sample: pong
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(type='str', default='pong'),
),
supports_check_mode=True
)
if module.params['data'] == 'crash':
raise Exception("boom")
result = dict(
ping=module.params['data'],
)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
citassa1985/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/buzzfeed.py
|
133
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
'info_dict': {
'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
'description': 'Rambro!',
},
'playlist': [{
'info_dict': {
'id': 'aVCR29aE_OQ',
'ext': 'mp4',
'upload_date': '20141024',
'uploader_id': 'Buddhanz1',
'description': 'He likes to stay in shape with his heavy bag, he wont stop until its on the ground\n\nFollow Angry Ram on Facebook for regular updates -\nhttps://www.facebook.com/pages/Angry-Ram/1436897249899558?ref=hl',
'uploader': 'Buddhanz',
'title': 'Angry Ram destroys a punching bag',
}
}]
}, {
'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
'params': {
'skip_download': True, # Got enough YouTube download tests
},
'info_dict': {
'id': 'look-at-this-cute-dog-omg',
'description': 're:Munchkin the Teddy Bear is back ?!',
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
},
'playlist': [{
'info_dict': {
'id': 'mVmBL8B-In0',
'ext': 'mp4',
'upload_date': '20141124',
'uploader_id': 'CindysMunchkin',
'description': 're:© 2014 Munchkin the',
'uploader': 're:^Munchkin the',
'title': 're:Munchkin the Teddy Bear gets her exercise',
},
}]
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get('video') or bd.get('progload_video')
if not video:
continue
entries.append(self.url_result(video['url']))
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'entries': entries,
}
|
kencochrane/django-intercom
|
refs/heads/master
|
runtests.py
|
29
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures))
|
CloudBoltSoftware/cloudbolt-forge
|
refs/heads/master
|
blueprints/aws_aurora_db_clusters/management/create_snapshot.py
|
1
|
"""
Take snapshot action for AWS RDS DB Cluster.
"""
from resourcehandlers.aws.models import AWSHandler
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
import boto3
import time
from django.db import IntegrityError
def run(job, resource, **kwargs):
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
db_cluster_identifier = resource.attributes.get(field__name='db_cluster_identifier').value
handler = AWSHandler.objects.get(id=rh_id)
db_cluster_snapshot_identifier = '{{ db_cluster_snapshot_identifier }}'
set_progress('Connecting to Amazon RDS')
rds = boto3.client('rds',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd
)
set_progress('Creating a snapshot for "{}"'.format(db_cluster_identifier))
rds.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier=db_cluster_snapshot_identifier,
DBClusterIdentifier=db_cluster_identifier,
)
return "SUCCESS", "Cluster has succesfully been created", ""
|
proxysh/Safejumper-for-Desktop
|
refs/heads/master
|
buildmac/Resources/env/lib/python2.7/site-packages/twisted/python/test/deprecatedattributes.py
|
13
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A module that is deprecated, used by L{twisted.python.test.test_deprecate} for
testing purposes.
"""
from __future__ import division, absolute_import
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# Known module-level attributes.
DEPRECATED_ATTRIBUTE = 42
ANOTHER_ATTRIBUTE = 'hello'
version = Version('Twisted', 8, 0, 0)
message = 'Oh noes!'
deprecatedModuleAttribute(
version,
message,
__name__,
'DEPRECATED_ATTRIBUTE')
|
ChinaMassClouds/copenstack-server
|
refs/heads/master
|
openstack/src/horizon-2014.2/horizon/test/test_dashboards/cats/kittens/urls.py
|
6
|
from django.conf.urls import patterns
from django.conf.urls import url
from horizon.test.test_dashboards.cats.kittens.views import IndexView # noqa
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
)
|
ericzolf/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py
|
66
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
DOCUMENTATION = r'''
module: testmodule
description: for testing
extends_documentation_fragment:
- noncollbogusfrag
- noncollbogusfrag.bogusvar
- bogusns.testcoll.frag
- testns.boguscoll.frag
- testns.testcoll.bogusfrag
- testns.testcoll.frag.bogusvar
'''
def main():
print(json.dumps(dict(changed=False, source='user')))
if __name__ == '__main__':
main()
|
lightbulb-framework/lightbulb-framework
|
refs/heads/master
|
lightbulb/data/regex/MODSECCURRENT/973309.py
|
667
|
META = {
'author': 'George Argyros, Ioannis Stais',
'description': 'Automatic transformed ruleset',
'type':'Regex',
'comments': []
}
|
cloudera/Impala
|
refs/heads/cdh6.3.0
|
tests/common/test_vector.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# A TextMatrix is used to generate a set of ImpalaTestVectors. The vectors that are
# generated are based on one or more ImpalaTestDimensions inputs. These lists define
# the set of values that are interesting to a test. For example, for file_format
# these might be 'seq', 'text', etc
#
# The ImpalaTestMatrix is then used to generate a set of ImpalaTestVectors. Each
# ImpalaTestVector contains a single value from each of the input ImpalaTestDimensions.
# An example:
#
# ImpalaTestMatrix.add_dimension('file_format', 'seq', 'text')
# ImpalaTestMatrix.add_dimension('agg_func', 'min', 'max', 'sum')
# ImpalaTestMatrix.add_dimension('col_type', 'int', 'bool')
# test_vectors = ImpalaTestMatrix.generate_test_vectors(...)
#
# Would return a collection of ImpalaTestVectors, with each one containing a
# combination of file_format, agg_func, and col_type:
# seq, min, int
# text, max, bool
# ...
#
# A ImpalaTestVector is an object itself, and the 'get_value' function is used to
# extract the actual value from the ImpalaTestVector for this particular combination:
# test_vector = test_vectors[0]
# print test_vector.get_value('file_format')
#
# The combinations of ImpalaTestVectors generated can be done in two ways: pairwise
# and exhaustive. Pairwise provides a way to get good coverage and reduce the total
# number of combinations generated where exhaustive will generate all valid
# combinations.
#
# Finally, the ImpalaTestMatrix also provides a way to add constraints to the vectors
# that are generated. This is useful to filter out invalid combinations. These can
# be added before calling 'generate_test_vectors'. The constraint is a function that
# accepts a ImpalaTestVector object and returns true if the vector is valid, false
# otherwise. For example, if we want to make sure 'bool' columns are not used with 'sum':
#
# ImpalaTestMatrix.add_constraint(lambda v:\
# not (v.get_value('col_type') == 'bool' and v.get_value('agg_func') == 'sum'))
#
# Additional examples of usage can be found within the test suites.
from itertools import product
# A list of test dimension values.
class ImpalaTestDimension(list):
def __init__(self, name, *args):
self.name = name
self.extend([ImpalaTestVector.Value(name, arg) for arg in args])
# A test vector that passed to test method. The ImpalaTestVector can be used to
# extract values for the specified dimension(s)
class ImpalaTestVector(object):
def __init__(self, vector_values):
self.vector_values = vector_values
def get_value(self, name):
for vector_value in self.vector_values:
if vector_value.name == name:
return vector_value.value
raise ValueError("Test vector does not contain value '%s'" % name)
def __str__(self):
return ' | '.join(['%s' % vector_value for vector_value in self.vector_values])
# Each value in a test vector is wrapped in the Value object. This wrapping is
# done internally so this object should never need to be created by the user.
class Value(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return '%s: %s' % (self.name, self.value)
# Matrix -> Collection of vectors
# Vector -> Call to get specific values
class ImpalaTestMatrix(object):
def __init__(self, *args):
self.dimensions = dict((arg.name, arg) for arg in args)
self.constraint_list = list()
def add_dimension(self, dimension):
self.dimensions[dimension.name] = dimension
def add_mandatory_exec_option(self, exec_option_key, exec_option_value):
for vector in self.dimensions['exec_option']:
vector.value[exec_option_key] = exec_option_value
def clear(self):
self.dimensions.clear()
def clear_dimension(self, dimension_name):
del self.dimensions[dimension_name]
def has_dimension(self, dimension_name):
return self.dimensions.has_key(dimension_name)
def generate_test_vectors(self, exploration_strategy):
if not self.dimensions:
return list()
# TODO: Check valid exploration strategies, provide more options for exploration
if exploration_strategy == 'exhaustive':
return self.__generate_exhaustive_combinations()
elif exploration_strategy in ['core', 'pairwise']:
return self.__generate_pairwise_combinations()
else:
raise ValueError, 'Unknown exploration strategy: %s' % exploration_strategy
def __generate_exhaustive_combinations(self):
return [ImpalaTestVector(vec) for vec in product(*self.__extract_vector_values())
if self.is_valid(vec)]
def __generate_pairwise_combinations(self):
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
# Pairwise fails if the number of inputs == 1. Use exhaustive in this case the
# results will be the same.
if len(self.dimensions) == 1:
return self.__generate_exhaustive_combinations()
return [ImpalaTestVector(vec) for vec in all_pairs(self.__extract_vector_values(),
filter_func = self.is_valid)]
def add_constraint(self, constraint_func):
self.constraint_list.append(constraint_func)
def clear_constraints(self):
self.constraint_list = list()
def __extract_vector_values(self):
# The data is stored as a tuple of (name, [val1, val2, val3]). So extract the
# actual values from this
return [v[1] for v in self.dimensions.items()]
def is_valid(self, vector):
for constraint in self.constraint_list:
if (isinstance(vector, list) or isinstance(vector, tuple)) and\
len(vector) == len(self.dimensions):
valid = constraint(ImpalaTestVector(vector))
if valid:
continue
return False
return True
|
JohnStarich/github-code-recommendations
|
refs/heads/master
|
data-scripts/mongo-client.py
|
1
|
#!/usr/bin/env python3
import pandas as pd
from pymongo import MongoClient
def _connect_mongo(host, port, username, password, db):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
|
bhargavz/py-twitter-sentiment-analysis
|
refs/heads/master
|
data/db/base/baseDB.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: BaseDB.py
# DATE: April, 2012
# The base level DB class for tweet collection and analysis
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
#
import os
from sqlalchemy.orm import clear_mappers
class BaseDB(object):
def __init__(self, config = None):
self.config = config
self.db = None
self.session = None
self.limit_size = 5000
self.offset = 0
self.paged_qid = 1000
self.paged_queries = { }
def close(self):
if( self.session ):
self.session.flush()
if( self.db ):
self.db.close()
def insert_in_table(self, table, set):
try:
table.insert().execute(set)
except Exception, e:
print "FATAL ERROR while inserting recs into \"%s\" Table"(table.name)
sys.exit(0)
return True
def new_paged_query(self, query):
qid = self.paged_qid
self.paged_qid += 1
q_rec = {'active_query':query,'limit_size':self.limit_size,'offset':self.offset}
self.paged_queries[qid] = q_rec
return qid
def get_paged_query(self, qid):
if( self.paged_queries.has_key(qid) ):
q_rec = self.paged_queries[qid]
return q_rec['active_query']
return None
def update_paged_query(self, qid, q):
if( self.paged_queries.has_key(qid) ):
q_rec = self.paged_queries[qid]
q_rec['active_query'] = q
return True
return False
def delete_paged_query(self, qid):
if( self.paged_queries.has_key(qid) ):
del self.paged_queries[qid]
return True
return False
def paged_query_page_size(self, qid, page_size):
if( self.paged_queries.has_key(qid) ):
q_rec = self.paged_queries[qid]
q_rec['limit_size'] = page_size
return True
return False
def paged_query_starting_offset(self, qid, s_offset):
if( self.paged_queries.has_key(qid) ):
q_rec = self.paged_queries[qid]
q_rec['offset'] = s_offset
return True
return False
def paged_query_next_page(self, qid):
if( self.paged_queries.has_key(qid) ):
q_rec = self.paged_queries[qid]
query = q_rec['active_query']
l = q_rec['limit_size']
o = q_rec['offset']
q_rec['offset'] = o + l
return query.limit(l).offset(o)
return None
def delete_item(self, item):
if( self.session ):
self.session.delete(item)
return True
else:
return False
def update_item(self, item):
if( self.session ):
self.session.flush()
return True
else:
return False
def insert_item(self, item):
if( self.session ):
self.session.add(item)
return True
else:
return False
def commit_changes(self):
if( self.session ):
self.session.flush()
return True
else:
return False
|
TitoAlehandro/calc
|
refs/heads/master
|
djangobb_forum/tests/__init__.py
|
4
|
from test_forum import *
from test_reputation import *
from test_profile import *
from test_utils import *
from test_templatetags import *
|
ian-r-rose/mcplates
|
refs/heads/master
|
mcplates/data/continents/mkfrp.py
|
1
|
#!/usr/bin/env python
files=['aus','eur','mad','nwaf','col','grn','nam','par','eant','ind','neaf','sac','ib']
out=open('../frp.py','w')
out.write("def get_pole(continent,age):\n")
for file in files:
outstring=" if continent=="+repr(file)+":\n"
out.write(outstring)
if file!='ib':
f=open(file+'_saf.frp','rU')
else:
f=open('ib_eur.frp','rU')
frp='['
for line in f.readlines():
rec=line.split()
frp=frp+"["+rec[0]+','+rec[1]+','+rec[2]+','+rec[3]+'],'
outstring=" cont= "+frp[:-1]+']\n'
out.write(outstring)
outstring=" for rec in cont:\n "
out.write(outstring)
outstring=" if age==int(rec[0]): return [rec[1],rec[2],rec[3]] \n"
out.write(outstring)
outstring=" if continent=='saf':\n"
out.write(outstring)
f=open('saf.frp','rU')
frp='['
for line in f.readlines():
rec=line.split()
frp=frp+"["+rec[0]+','+rec[1]+','+rec[2]+','+rec[3]+'],'
outstring=" cont= "+frp[:-1]+']\n'
out.write(outstring)
outstring=" for rec in cont:\n "
out.write(outstring)
outstring=" if age==int(rec[0]): return [rec[1],rec[2],rec[3]] \n"
out.write(outstring)
outstring=" return 'NONE'\n"
out.write(outstring)
|
swegener/libsigrokdecode
|
refs/heads/master
|
decoders/ltc242x/pd.py
|
4
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2020 Analog Devices Inc.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
input_voltage_format = ['%.6fV', '%.2fV']
class Decoder(srd.Decoder):
api_version = 3
id = 'ltc242x'
name = 'LTC242x'
longname = 'Linear Technology LTC242x'
desc = 'Linear Technology LTC2421/LTC2422 1-/2-channel 20-bit ADC.'
license = 'gplv2+'
inputs = ['spi']
outputs = []
tags = ['IC', 'Analog/digital']
annotations = (
('ch0_voltage', 'CH0 voltage'),
('ch1_voltage', 'CH1 voltage'),
)
annotation_rows = (
('ch0_voltages', 'CH0 voltages', (0,)),
('ch1_voltages', 'CH1 voltages', (1,)),
)
options = (
{'id': 'vref', 'desc': 'Reference voltage (V)', 'default': 1.5},
)
def __init__(self):
self.reset()
def reset(self):
self.data = 0
self.ss, self.es = 0, 0
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_input_voltage(self, data):
input_voltage = data & 0x3FFFFF
input_voltage = -(2**21 - input_voltage)
input_voltage = (input_voltage / 0xfffff) * self.options['vref']
ann = []
for format in input_voltage_format:
ann.append(format % input_voltage)
channel = (data & (1 << 22)) >> 22
self.put(self.ss, self.es, self.out_ann, [channel, ann])
def decode(self, ss, es, data):
ptype = data[0]
if ptype == 'CS-CHANGE':
cs_old, cs_new = data[1:]
if cs_old is not None and cs_old == 0 and cs_new == 1:
self.es = es
self.data >>= 1
self.handle_input_voltage(self.data)
self.data = 0
elif cs_old is not None and cs_old == 1 and cs_new == 0:
self.ss = ss
elif ptype == 'BITS':
miso = data[2]
for bit in reversed(miso):
self.data = self.data | bit[0]
self.data <<= 1
|
vanatteveldt/xtas
|
refs/heads/master
|
xtas/tasks/es.py
|
1
|
"""Elasticsearch stuff."""
from __future__ import absolute_import
from datetime import datetime
from six import iteritems
from chardet import detect as chardetect
from elasticsearch import Elasticsearch
from ..core import app, _config
def _es():
return Elasticsearch(hosts=_config['ELASTICSEARCH'])
_ES_DOC_FIELDS = ('index', 'type', 'id', 'field')
def es_document(idx, typ, id, field):
"""Returns a handle on a field in a document living in the ES store.
This does not fetch the document, or even check that it exists.
"""
# Returns a dict instead of a custom object to ensure JSON serialization
# works.
return {'index': idx, 'type': typ, 'id': id, 'field': field}
def fetch(doc):
"""Fetch document (if necessary).
Parameters
----------
doc : {dict, string}
A dictionary representing a handle returned by es_document, or a plain
string. A Unicode string (Python 2 unicode, Python 3 str) will be
returned as-is. A byte string (Python 2 str, Python 3 bytes) will be
run through chardet to guess the encoding, then decoded with
errors="replace".
Returns
-------
content : string
Document contents.
"""
if isinstance(doc, dict) and set(doc.keys()) == set(_ES_DOC_FIELDS):
idx, typ, id, field = [doc[k] for k in _ES_DOC_FIELDS]
return _es().get_source(index=idx, doc_type=typ, id=id)[field]
elif isinstance(doc, unicode):
return doc
elif isinstance(doc, str):
enc = chardetect(doc)['encoding']
return doc.decode(enc, errors="replace")
else:
raise TypeError("fetch expected es_document or string, got %s"
% type(doc))
@app.task
def fetch_query_batch(idx, typ, query, field='body'):
"""Fetch all documents matching query and return them as a list.
Returns a list of field contents, with documents that don't have the
required field silently filtered out.
"""
r = _es().search(index=idx, doc_type=typ, body={'query': query},
_source=[field])
r = (hit['_source'].get(field, None) for hit in r['hits']['hits'])
return [hit for hit in r if hit is not None]
@app.task
def store_single(data, taskname, idx, typ, id, return_data=True):
"""Store the data in the xtas_results.taskname property of the document.
If return_data is true, also returns data (useful for debugging). Set this
to false to save bandwidth.
"""
now = datetime.now().isoformat()
doc = {"xtas_results": {taskname: {'data': data, 'timestamp': now}}}
_es().update(index=idx, doc_type=typ, id=id, body={"doc": doc})
return data if return_data else None
def get_all_results(idx, typ, id):
"""
Get all xtas results for the document
Returns a (possibly empty) {taskname : data} dict
"""
r = _es().get(index=idx, doc_type=typ, id=id, _source=['xtas_results'])
if 'xtas_results' in r['_source']:
return {k: v['data']
for k, v in iteritems(r['_source']['xtas_results'])}
else:
return {}
def get_single_result(taskname, idx, typ, id):
"""Get a single xtas result"""
r = get_all_results(idx, typ, id)
return r.get(taskname)
|
garoa/pingo
|
refs/heads/master
|
pingo/__init__.py
|
2
|
# api
from board import ANALOG # noqa
from board import IN # noqa
from board import OUT # noqa
from board import PWM # noqa
from board import HIGH # noqa
from board import LOW # noqa
from board import ModeNotSuported # noqa
from board import WrongPinMode # noqa
from board import PwmOutputCapable # noqa
from board import AnalogInputCapable # noqa
from board import Board # noqa
from board import PwmPin # noqa
from board import AnalogPin # noqa
from board import DigitalPin # noqa
from board import GroundPin # noqa
from board import Pin # noqa
from board import VccPin # noqa
import parts # noqa
# boards
import rpi # noqa
import ghost # noqa
import galileo # noqa
import udoo # noqa
import pcduino # noqa
import arduino # noqa
import bbb # noqa
# resources
import detect # noqa
import test # noqa
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
tools/telemetry/telemetry/testing/__init__.py
|
1201
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
MSOpenTech/edx-platform
|
refs/heads/master
|
lms/djangoapps/branding/views.py
|
29
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import redirect
from django_future.csrf import ensure_csrf_cookie
import student.views
from student.models import CourseEnrollment
import courseware.views
from microsite_configuration import microsite
from edxmako.shortcuts import marketing_link
from util.cache import cache_if_anonymous
def get_course_enrollments(user):
"""
Returns the course enrollments for the passed in user within the context of a microsite, that
is filtered by course_org_filter
"""
enrollments = CourseEnrollment.enrollments_for_user(user)
microsite_org = microsite.get_value('course_org_filter')
if microsite_org:
site_enrollments = [
enrollment for enrollment in enrollments if enrollment.course_id.org == microsite_org
]
else:
site_enrollments = [
enrollment for enrollment in enrollments
]
return site_enrollments
@ensure_csrf_cookie
@cache_if_anonymous()
def index(request):
'''
Redirects to main page -- info page if user authenticated, or marketing if not
'''
if settings.COURSEWARE_ENABLED and request.user.is_authenticated():
# For microsites, only redirect to dashboard if user has
# courses in his/her dashboard. Otherwise UX is a bit cryptic.
# In this case, we want to have the user stay on a course catalog
# page to make it easier to browse for courses (and register)
if microsite.get_value(
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER',
settings.FEATURES.get('ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER', True)
):
return redirect(reverse('dashboard'))
if settings.FEATURES.get('AUTH_USE_CERTIFICATES'):
from external_auth.views import ssl_login
# Set next URL to dashboard if it isn't set to avoid
# caching a redirect to / that causes a redirect loop on logout
if not request.GET.get('next'):
req_new = request.GET.copy()
req_new['next'] = reverse('dashboard')
request.GET = req_new
return ssl_login(request)
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(settings.MKTG_URLS.get('ROOT'))
domain = request.META.get('HTTP_HOST')
# keep specialized logic for Edge until we can migrate over Edge to fully use
# microsite definitions
if domain and 'edge.edx.org' in domain:
return redirect(reverse("signin_user"))
# we do not expect this case to be reached in cases where
# marketing and edge are enabled
return student.views.index(request, user=request.user)
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render the "find courses" page. If the marketing site is enabled, redirect
to that. Otherwise, if subdomain branding is on, this is the university
profile page. Otherwise, it's the edX courseware.views.courses page
"""
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(marketing_link('COURSES'), permanent=True)
if not settings.FEATURES.get('COURSES_ARE_BROWSABLE'):
raise Http404
# we do not expect this case to be reached in cases where
# marketing is enabled or the courses are not browsable
return courseware.views.courses(request)
|
netcriptus/Canivete
|
refs/heads/master
|
functions/pula_um.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
"""
pula-um.py
Created by Fernando Cezar on 2012-05-20.
Copyright (c) 2012 __MyCompanyName__. All rights reserved.
"""
from lib.IOHandler import *
def pula_um(arq_input, arq_path):
"""
Dado uma string, lê um caracter sim e um não.
"""
strings = read_input(arq_input, arq_path)
decrypted_string = ""
for string in strings:
decrypted_string += string[::2] + "\n"
return decrypted_string
|
h3biomed/ansible-modules-extras
|
refs/heads/devel
|
cloud/misc/ovirt.py
|
23
|
#!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt
author: "Vincent Van der Kussen (@vincentvdk)"
short_description: oVirt/RHEV platform management
description:
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
instance_dns:
description:
- define the instance's Primary DNS server
required: false
aliases: [ dns ]
version_added: "2.1"
instance_domain:
description:
- define the instance's Domain
required: false
aliases: [ domain ]
version_added: "2.1"
instance_hostname:
description:
- define the instance's Hostname
required: false
aliases: [ hostname ]
version_added: "2.1"
instance_ip:
description:
- define the instance's IP
required: false
aliases: [ ip ]
version_added: "2.1"
instance_netmask:
description:
- define the instance's Netmask
required: false
aliases: [ netmask ]
version_added: "2.1"
instance_rootpw:
description:
- define the instance's Root password
required: false
aliases: [ rootpw ]
version_added: "2.1"
instance_key:
description:
- define the instance's Authorized key
required: false
aliases: [ key ]
version_added: "2.1"
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements:
- "python >= 2.6"
- "ovirt-engine-sdk-python"
'''
EXAMPLES = '''
# Basic example provisioning from image.
ovirt:
user: admin@internal
url: https://ovirt.example.com
instance_name: ansiblevm04
password: secret
image: centos_64
zone: cluster01
resource_type: template"
# Full example to create new instance from scratch
ovirt:
instance_name: testansible
resource_type: new
instance_type: server
user: admin@internal
password: secret
url: https://ovirt.example.com
instance_disksize: 10
zone: cluster01
region: datacenter1
instance_cpus: 1
instance_nic: nic1
instance_network: rhevm
instance_mem: 1000
disk_alloc: thin
sdomain: FIBER01
instance_cores: 1
instance_os: rhel_6x64
disk_int: virtio"
# stopping an instance
ovirt:
instance_name: testansible
state: stopped
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance with cloud init information
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
hostname: testansible
domain: ansible.local
ip: 192.0.2.100
netmask: 255.255.255.0
gateway: 192.0.2.1
rootpw: bigsecret
'''
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_OVIRTSDK = True
except ImportError:
HAS_OVIRTSDK = False
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
raise Exception("error connecting to the oVirt API")
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
raise Exception("Error creating VM with specified parameters")
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
raise Exception("Error attaching disk")
try:
vm.nics.add(nic_net1)
except:
raise Exception("Error adding nic")
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
raise Exception('error adding template %s' % image)
# start instance
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
domain=None, dns=None, rootpw=None, key=None):
vm = conn.vms.get(name=vmname)
use_cloud_init = False
nics = None
nic = None
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
use_cloud_init = True
if ip and netmask and gateway:
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
nics = params.Nics()
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
authorized_ssh_keys=key)
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
vm.start(action=action)
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm == None:
name = "empty"
else:
name = vm.get_name()
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True, no_log=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
instance_hostname = dict(aliases=['hostname']),
instance_ip = dict(aliases=['ip']),
instance_netmask = dict(aliases=['netmask']),
instance_gateway = dict(aliases=['gateway']),
instance_domain = dict(aliases=['domain']),
instance_dns = dict(aliases=['dns']),
instance_rootpw = dict(aliases=['rootpw']),
instance_key = dict(aliases=['key']),
sdomain = dict(),
region = dict(),
)
)
if not HAS_OVIRTSDK:
module.fail_json(msg='ovirtsdk required for this module')
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
hostname = module.params['instance_hostname']
ip = module.params['instance_ip']
netmask = module.params['instance_netmask']
gateway = module.params['instance_gateway']
domain = module.params['instance_domain']
dns = module.params['instance_dns']
rootpw = module.params['instance_rootpw']
key = module.params['instance_key']
#initialize connection
try:
c = conn(url+"/api", user, password)
except Exception as e:
module.fail_json(msg='%s' % e)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
try:
create_vm_template(c, vmname, image, zone)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
try:
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
#vm_start(c, vmname)
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
segment-routing/openwrt
|
refs/heads/sr-ipv6-4.4-openwrt
|
tools/perf/scripts/python/check-perf-trace.py
|
1997
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
OrhanOdabasi/weirdbutreal
|
refs/heads/master
|
story/migrations/0024_auto_20170322_0029.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-22 00:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('story', '0023_auto_20170321_1329'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='kind',
field=models.CharField(choices=[('Story', 'Story'), ('CommentLike', 'CommentLike'), ('Comment', 'Comment')], max_length=15),
),
]
|
semonte/intellij-community
|
refs/heads/master
|
python/lib/Lib/encodings/mac_turkish.py
|
593
|
""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-turkish',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\uf8a0' # 0xF5 -> undefined1
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
jkshaver/virtualenv-1.8.2
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/namedwizardtests/forms.py
|
318
|
import os
import tempfile
from django import forms
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import NamedUrlWizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(NamedUrlWizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data()
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
tudorvio/nova
|
refs/heads/master
|
nova/api/openstack/compute/schemas/v3/availability_zone.py
|
73
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
server_create = {
'availability_zone': parameter_types.name,
}
|
jonathanunderwood/numpy
|
refs/heads/master
|
doc/newdtype_example/example.py
|
136
|
from __future__ import division, absolute_import, print_function
import floatint.floatint as ff
import numpy as np
# Setting using array is hard because
# The parser doesn't stop at tuples always
# So, the setitem code will be called with scalars on the
# wrong shaped array.
# But we can get a view as an ndarray of the given type:
g = np.array([1, 2, 3, 4, 5, 6, 7, 8]).view(ff.floatint_type)
# Now, the elements will be the scalar type associated
# with the ndarray.
print(g[0])
print(type(g[1]))
# Now, you need to register ufuncs and more arrfuncs to do useful things...
|
dkubiak789/OpenUpgrade
|
refs/heads/8.0
|
addons/mrp/tests/__init__.py
|
140
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_multicompany
checks = [
test_multicompany,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rocopartners/django-oscar
|
refs/heads/master
|
src/oscar/management/commands/oscar_import_catalogue_images.py
|
51
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from oscar.core.loading import get_class
Importer = get_class('catalogue.utils', 'Importer')
logger = logging.getLogger('oscar.catalogue.import')
class Command(BaseCommand):
args = '/path/to/folder'
help = 'For importing product images from a folder'
option_list = BaseCommand.option_list + (
make_option('--filename',
dest='filename',
default='upc',
help='Product field to lookup from image filename'),
)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Command requires a path to a single folder')
logger.info("Starting image import")
dirname = args[0]
importer = Importer(logger, field=options.get('filename'))
importer.handle(dirname)
|
sametmax/Django--an-app-at-a-time
|
refs/heads/master
|
ignore_this_directory/django/contrib/contenttypes/migrations/0001_initial.py
|
113
|
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together={('app_label', 'model')},
),
]
|
suyashphadtare/sajil-frappe
|
refs/heads/develop
|
frappe/core/doctype/notification_count/notification_count.py
|
23
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import MySQLdb
from frappe.model.document import Document
logger = frappe.get_logger()
class NotificationCount(Document):
pass
@frappe.whitelist()
def get_notifications():
if frappe.flags.in_install_app:
return
config = get_notification_config()
can_read = frappe.user.get_can_read()
open_count_doctype = {}
open_count_module = {}
notification_count = dict(frappe.db.sql("""select for_doctype, open_count
from `tabNotification Count` where owner=%s""", (frappe.session.user,)))
for d in config.for_doctype:
if d in can_read:
condition = config.for_doctype[d]
key = condition.keys()[0]
if d in notification_count:
open_count_doctype[d] = notification_count[d]
else:
result = frappe.get_list(d, fields=["count(*)"],
filters=[[d, key, "=", condition[key]]], as_list=True)[0][0]
open_count_doctype[d] = result
try:
frappe.get_doc({"doctype":"Notification Count", "for_doctype":d,
"open_count":result}).insert(ignore_permissions=True)
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
for m in config.for_module:
if m in notification_count:
open_count_module[m] = notification_count[m]
else:
open_count_module[m] = frappe.get_attr(config.for_module[m])()
try:
frappe.get_doc({"doctype":"Notification Count", "for_doctype":m,
"open_count":open_count_module[m]}).insert(ignore_permissions=True)
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
return {
"open_count_doctype": open_count_doctype,
"open_count_module": open_count_module
}
def clear_notifications(user=None):
if frappe.flags.in_install_app=="frappe":
return
try:
if user:
frappe.db.sql("""delete from `tabNotification Count` where owner=%s""", (user,))
else:
frappe.db.sql("""delete from `tabNotification Count`""")
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
def delete_notification_count_for(doctype):
if frappe.flags.in_import: return
try:
frappe.db.sql("""delete from `tabNotification Count` where for_doctype = %s""", (doctype,))
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
def clear_doctype_notifications(doc, method=None, *args, **kwargs):
if frappe.flags.in_import:
return
config = get_notification_config()
doctype = doc.doctype
if doctype in config.for_doctype:
delete_notification_count_for(doctype)
return
if doctype in config.for_module_doctypes:
delete_notification_count_for(config.for_module_doctypes[doctype])
def get_notification_info_for_boot():
out = get_notifications()
config = get_notification_config()
can_read = frappe.user.get_can_read()
conditions = {}
module_doctypes = {}
doctype_info = dict(frappe.db.sql("""select name, module from tabDocType"""))
for d in list(set(can_read + config.for_doctype.keys())):
if d in config.for_doctype:
conditions[d] = config.for_doctype[d]
if d in doctype_info:
module_doctypes.setdefault(doctype_info[d], []).append(d)
out.update({
"conditions": conditions,
"module_doctypes": module_doctypes,
})
return out
def get_notification_config():
config = frappe._dict()
for notification_config in frappe.get_hooks().notification_config:
nc = frappe.get_attr(notification_config)()
for key in ("for_doctype", "for_module", "for_module_doctypes"):
config.setdefault(key, {})
config[key].update(nc.get(key, {}))
return config
def on_doctype_update():
if not frappe.db.sql("""show index from `tabNotification Count`
where Key_name="notification_count_owner_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabNotification Count`
add index notification_count_owner_index(owner)""")
|
uahic/nest-simulator
|
refs/heads/master
|
examples/nest/plot_tsodyks_shortterm_bursts.py
|
13
|
# -*- coding: utf-8 -*-
#
# plot_tsodyks_shortterm_bursts.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pylab
from nest import raster_plot
raster_plot.from_file("spike_detector-503-0.gdf",hist=True)
pylab.show()
|
joeyjojo/django_offline
|
refs/heads/master
|
src/django/contrib/staticfiles/utils.py
|
322
|
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
mgagne/nova
|
refs/heads/master
|
nova/tests/unit/console/test_console.py
|
11
|
# Copyright (c) 2010 OpenStack Foundation
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Console proxy."""
from oslo.config import cfg
from oslo.utils import importutils
from nova.compute import rpcapi as compute_rpcapi
from nova.console import api as console_api
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
from nova import test
CONF = cfg.CONF
CONF.import_opt('console_manager', 'nova.service')
CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy manager."""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = importutils.import_object(CONF.console_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
def _create_instance(self):
"""Create a test instance."""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = 1
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)
def test_get_pool_for_instance_host(self):
pool = self.console._get_pool_for_instance_host(self.context,
self.host)
self.assertEqual(pool['compute_host'], self.host)
def test_get_pool_creates_new_pool_if_needed(self):
self.assertRaises(exception.NotFound,
db.console_pool_get_by_host_type,
self.context,
self.host,
self.console.host,
self.console.driver.console_type)
pool = self.console._get_pool_for_instance_host(self.context,
self.host)
pool2 = db.console_pool_get_by_host_type(self.context,
self.host,
self.console.host,
self.console.driver.console_type)
self.assertEqual(pool['id'], pool2['id'])
def test_get_pool_does_not_create_new_pool_if_exists(self):
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass',
'host': self.console.host,
'console_type': self.console.driver.console_type,
'compute_host': 'sometesthostname'}
new_pool = db.console_pool_create(self.context, pool_info)
pool = self.console._get_pool_for_instance_host(self.context,
'sometesthostname')
self.assertEqual(pool['id'], new_pool['id'])
def test_add_console(self):
instance = self._create_instance()
self.console.add_console(self.context, instance['id'])
instance = db.instance_get(self.context, instance['id'])
pool = db.console_pool_get_by_host_type(self.context,
instance['host'], self.console.host,
self.console.driver.console_type)
console_instances = [con['instance_uuid'] for con in pool['consoles']]
self.assertIn(instance['uuid'], console_instances)
db.instance_destroy(self.context, instance['uuid'])
def test_add_console_does_not_duplicate(self):
instance = self._create_instance()
cons1 = self.console.add_console(self.context, instance['id'])
cons2 = self.console.add_console(self.context, instance['id'])
self.assertEqual(cons1, cons2)
db.instance_destroy(self.context, instance['uuid'])
def test_remove_console(self):
instance = self._create_instance()
console_id = self.console.add_console(self.context, instance['id'])
self.console.remove_console(self.context, console_id)
self.assertRaises(exception.NotFound,
db.console_get,
self.context,
console_id)
db.instance_destroy(self.context, instance['uuid'])
class ConsoleAPITestCase(test.TestCase):
"""Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.console_api = console_api.API()
self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000'
self.fake_instance = {
'id': 1,
'uuid': self.fake_uuid,
'host': 'fake_host'
}
self.fake_console = {
'pool': {'host': 'fake_host'},
'id': 'fake_id'
}
def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid):
return self.fake_console
self.stubs.Set(db, 'console_get', _fake_db_console_get)
def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid,
columns_to_join):
return [self.fake_console]
self.stubs.Set(db, 'console_get_all_by_instance',
_fake_db_console_get_all_by_instance)
def _fake_instance_get_by_uuid(_ctxt, _instance_uuid):
return self.fake_instance
self.stubs.Set(db, 'instance_get_by_uuid', _fake_instance_get_by_uuid)
def test_get_consoles(self):
console = self.console_api.get_consoles(self.context, self.fake_uuid)
self.assertEqual(console, [self.fake_console])
def test_get_console(self):
console = self.console_api.get_console(self.context, self.fake_uuid,
'fake_id')
self.assertEqual(console, self.fake_console)
def test_delete_console(self):
self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'remove_console')
console_rpcapi.ConsoleAPI.remove_console(self.context, 'fake_id')
self.mox.ReplayAll()
self.console_api.delete_console(self.context, self.fake_uuid,
'fake_id')
def test_create_console(self):
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI,
'get_console_topic')
compute_rpcapi.ComputeAPI.get_console_topic(
self.context, 'fake_host').AndReturn('compute.fake_host')
self.mox.StubOutClassWithMocks(console_rpcapi, 'ConsoleAPI')
console_api_mock = console_rpcapi.ConsoleAPI(
topic='compute', server='fake_host')
console_api_mock.add_console(self.context,
self.fake_instance['id'])
self.mox.ReplayAll()
self.console_api.create_console(self.context, self.fake_uuid)
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/gdata/samples/oauth/oauth_on_appengine/main_rsa.py
|
126
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import cgi
import os
import gdata.auth
import gdata.docs
import gdata.docs.service
import gdata.alt.appengine
from appengine_utilities.sessions import Session
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
SETTINGS = {
'APP_NAME': 'google-GDataOAuthAppEngine-v1',
'CONSUMER_KEY': 'YOUR_CONSUMER_KEY',
'SIG_METHOD': gdata.auth.OAuthSignatureMethod.RSA_SHA1,
'SCOPES': ['http://docs.google.com/feeds/',
'https://docs.google.com/feeds/']
}
f = open('/path/to/your/rsa_private_key.pem')
RSA_KEY = f.read()
f.close()
gdocs = gdata.docs.service.DocsService(source=SETTINGS['APP_NAME'])
gdocs.SetOAuthInputParameters(SETTINGS['SIG_METHOD'], SETTINGS['CONSUMER_KEY'],
rsa_key=RSA_KEY)
gdata.alt.appengine.run_on_appengine(gdocs)
class MainPage(webapp.RequestHandler):
"""Main page displayed to user."""
# GET /
def get(self):
if not users.get_current_user():
self.redirect(users.create_login_url(self.request.uri))
access_token = gdocs.token_store.find_token('%20'.join(SETTINGS['SCOPES']))
if isinstance(access_token, gdata.auth.OAuthToken):
form_action = '/fetch_data'
form_value = 'Now fetch my docs!'
revoke_token_link = True
else:
form_action = '/get_oauth_token'
form_value = 'Give this website access to my Google Docs'
revoke_token_link = None
template_values = {
'form_action': form_action,
'form_value': form_value,
'user': users.get_current_user(),
'revoke_token_link': revoke_token_link,
'oauth_token': access_token,
'consumer': gdocs.GetOAuthInputParameters().GetConsumer(),
'sig_method': gdocs.GetOAuthInputParameters().GetSignatureMethod().get_name()
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class OAuthDance(webapp.RequestHandler):
"""Handler for the 3 legged OAuth dance, v1.0a."""
"""This handler is responsible for fetching an initial OAuth request token,
redirecting the user to the approval page. When the user grants access, they
will be redirected back to this GET handler and their authorized request token
will be exchanged for a long-lived access token."""
# GET /get_oauth_token
def get(self):
"""Invoked after we're redirected back from the approval page."""
self.session = Session()
oauth_token = gdata.auth.OAuthTokenFromUrl(self.request.uri)
if oauth_token:
oauth_token.oauth_input_params = gdocs.GetOAuthInputParameters()
gdocs.SetOAuthToken(oauth_token)
# 3.) Exchange the authorized request token for an access token
oauth_verifier = self.request.get('oauth_verifier', default_value='')
access_token = gdocs.UpgradeToOAuthAccessToken(
oauth_verifier=oauth_verifier)
# Remember the access token in the current user's token store
if access_token and users.get_current_user():
gdocs.token_store.add_token(access_token)
elif access_token:
gdocs.current_token = access_token
gdocs.SetOAuthToken(access_token)
self.redirect('/')
# POST /get_oauth_token
def post(self):
"""Fetches a request token and redirects the user to the approval page."""
self.session = Session()
if users.get_current_user():
# 1.) REQUEST TOKEN STEP. Provide the data scope(s) and the page we'll
# be redirected back to after the user grants access on the approval page.
req_token = gdocs.FetchOAuthRequestToken(
scopes=SETTINGS['SCOPES'], oauth_callback=self.request.uri)
# Generate the URL to redirect the user to. Add the hd paramter for a
# better user experience. Leaving it off will give the user the choice
# of what account (Google vs. Google Apps) to login with.
domain = self.request.get('domain', default_value='default')
approval_page_url = gdocs.GenerateOAuthAuthorizationURL(
extra_params={'hd': domain})
# 2.) APPROVAL STEP. Redirect to user to Google's OAuth approval page.
self.redirect(approval_page_url)
class FetchData(OAuthDance):
"""Fetches the user's data."""
"""This class inherits from OAuthDance in order to utilize OAuthDance.post()
in case of a request error (e.g. the user has a bad token)."""
# GET /fetch_data
def get(self):
self.redirect('/')
# POST /fetch_data
def post(self):
"""Fetches the user's data."""
try:
feed = gdocs.GetDocumentListFeed()
json = []
for entry in feed.entry:
if entry.lastModifiedBy is not None:
last_modified_by = entry.lastModifiedBy.email.text
else:
last_modified_by = ''
if entry.lastViewed is not None:
last_viewed = entry.lastViewed.text
else:
last_viewed = ''
json.append({'title': entry.title.text,
'links': {'alternate': entry.GetHtmlLink().href},
'published': entry.published.text,
'updated': entry.updated.text,
'resourceId': entry.resourceId.text,
'type': entry.GetDocumentType(),
'lastModifiedBy': last_modified_by,
'lastViewed': last_viewed
})
self.response.out.write(simplejson.dumps(json))
except gdata.service.RequestError, error:
OAuthDance.post(self)
class RevokeToken(webapp.RequestHandler):
# GET /revoke_token
def get(self):
"""Revokes the current user's OAuth access token."""
try:
gdocs.RevokeOAuthToken()
except gdata.service.RevokingOAuthTokenFailed:
pass
gdocs.token_store.remove_all_tokens()
self.redirect('/')
def main():
application = webapp.WSGIApplication([('/', MainPage),
('/get_oauth_token', OAuthDance),
('/fetch_data', FetchData),
('/revoke_token', RevokeToken)],
debug=True)
run_wsgi_app(application)
|
bhargav2408/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_marshal.py
|
48
|
#!/usr/bin/env python3
from test import support
import marshal
import sys
import unittest
import os
class HelperMixin:
def helper(self, sample, *extra):
new = marshal.loads(marshal.dumps(sample, *extra))
self.assertEqual(sample, new)
try:
with open(support.TESTFN, "wb") as f:
marshal.dump(sample, f, *extra)
with open(support.TESTFN, "rb") as f:
new = marshal.load(f)
self.assertEqual(sample, new)
finally:
support.unlink(support.TESTFN)
class IntTestCase(unittest.TestCase, HelperMixin):
def test_ints(self):
# Test the full range of Python ints.
n = sys.maxsize
while n:
for expected in (-n, n):
self.helper(expected)
n = n >> 1
def test_int64(self):
# Simulate int marshaling on a 64-bit box. This is most interesting if
# we're running the test on a 32-bit box, of course.
def to_little_endian_string(value, nbytes):
b = bytearray()
for i in range(nbytes):
b.append(value & 0xff)
value >>= 8
return b
maxint64 = (1 << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = b'I' + to_little_endian_string(base, 8)
got = marshal.loads(s)
self.assertEqual(base, got)
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
def test_bool(self):
for b in (True, False):
self.helper(b)
class FloatTestCase(unittest.TestCase, HelperMixin):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxsize * 3.7e250
while n > small:
for expected in (-n, n):
self.helper(float(expected))
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxsize * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
self.helper(f)
self.helper(f, 1)
n *= 123.4567
class StringTestCase(unittest.TestCase, HelperMixin):
def test_unicode(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(marshal.loads(marshal.dumps(s)))
def test_string(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(s)
def test_bytes(self):
for s in [b"", b"Andr\xe8 Previn", b"abc", b" "*10000]:
self.helper(s)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.__code__
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
def test_many_codeobjects(self):
# Issue2957: bad recursion count on code objects
count = 5000 # more than MAX_MARSHAL_STACK_DEPTH
codes = (ExceptionTestCase.test_exceptions.__code__,) * count
marshal.loads(marshal.dumps(codes))
class ContainerTestCase(unittest.TestCase, HelperMixin):
d = {'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': "Andr\xe8 Previn"
}
def test_dict(self):
self.helper(self.d)
def test_list(self):
self.helper(list(self.d.items()))
def test_tuple(self):
self.helper(tuple(self.d.keys()))
def test_sets(self):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(2**65)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_recursion(self):
s = 'c' + ('X' * 4*4) + '{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
if os.name == 'nt' and hasattr(sys, 'gettotalrefcount'):
MAX_MARSHAL_STACK_DEPTH = 1500
else:
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, float, complex, tuple, list, dict, set, frozenset):
# Note: str subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = b'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_multiple_dumps_and_loads(self):
# Issue 12291: marshal.load() should be callable multiple times
# with interleaved data written by non-marshal code
# Adapted from a patch by Engelbert Gruber.
data = (1, 'abc', b'def', 1.0, (2, 'a', ['b', b'c']))
for interleaved in (b'', b'0123'):
ilen = len(interleaved)
positions = []
try:
with open(support.TESTFN, 'wb') as f:
for d in data:
marshal.dump(d, f)
if ilen:
f.write(interleaved)
positions.append(f.tell())
with open(support.TESTFN, 'rb') as f:
for i, d in enumerate(data):
self.assertEqual(d, marshal.load(f))
if ilen:
f.read(ilen)
self.assertEqual(positions[i], f.tell())
finally:
support.unlink(support.TESTFN)
def test_main():
support.run_unittest(IntTestCase,
FloatTestCase,
StringTestCase,
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
BugsTestCase)
if __name__ == "__main__":
test_main()
|
onecloud/neutron
|
refs/heads/master
|
neutron/tests/unit/oneconvergence/test_nvsd_plugin.py
|
6
|
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test Library for OneConvergencePlugin."""
import contextlib
import uuid
import mock
from oslo.config import cfg
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.oneconvergence import plugin as nvsd_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_l3_plugin
PLUGIN_NAME = 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2'
class OneConvergencePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
def mocked_oneconvergence_init(self):
def side_effect(*args, **kwargs):
return {'id': str(uuid.uuid4())}
self.nvsdlib = mock.Mock()
self.nvsdlib.create_network.side_effect = side_effect
with mock.patch.object(nvsd_plugin.OneConvergencePluginV2,
'oneconvergence_init',
new=mocked_oneconvergence_init):
super(OneConvergencePluginV2TestCase,
self).setUp(self._plugin_name)
class TestOneConvergencePluginNetworksV2(test_plugin.TestNetworksV2,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergencePluginSubnetsV2(test_plugin.TestSubnetsV2,
OneConvergencePluginV2TestCase):
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_create_subnet_with_v6_allocation_pool(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_create_subnet_ipv6_attributes(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_create_subnet_ipv6_single_attribute_set(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_ipv6_attributes(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_ipv6_inconsistent_enable_dhcp(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_ipv6_inconsistent_ra_attribute(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_ipv6_inconsistent_address_attribute(self):
self.skipTest("NVSD Plugin does not support IPV6.")
class TestOneConvergencePluginPortsV2(test_plugin.TestPortsV2,
test_bindings.PortBindingsTestCase,
OneConvergencePluginV2TestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
def test_requested_subnet_id_v4_and_v6(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_port_vif_details(self):
plugin = manager.NeutronManager.get_plugin()
with self.port(name='name') as port1:
ctx = context.get_admin_context()
port = plugin.get_port(ctx, port1['port']['id'])
self.assertEqual(port['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
def test_ports_vif_details(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
plugin = manager.NeutronManager.get_plugin()
with contextlib.nested(self.port(), self.port()) as (port1, port2):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self.assertEqual(port['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
def test_ip_allocation_for_ipv6_subnet_slaac_adddress_mode(self):
self.skipTest("NVSD Plugin does not support IPV6.")
class TestOneConvergenceBasicGet(test_plugin.TestBasicGet,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergenceV2HTTPResponse(test_plugin.TestV2HTTPResponse,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergenceL3NatTestCase(test_l3_plugin.L3NatDBIntTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
def mocked_oneconvergence_init(self):
def side_effect(*args, **kwargs):
return {'id': str(uuid.uuid4())}
self.nvsdlib = mock.Mock()
self.nvsdlib.create_network.side_effect = side_effect
ext_mgr = test_l3_plugin.L3TestExtensionManager()
with mock.patch.object(nvsd_plugin.OneConvergencePluginV2,
'oneconvergence_init',
new=mocked_oneconvergence_init):
super(TestOneConvergenceL3NatTestCase,
self).setUp(plugin=self._plugin_name, ext_mgr=ext_mgr)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(self._plugin_name)
|
rolando-contrib/scrapy
|
refs/heads/master
|
scrapy/linkextractors/htmlparser.py
|
17
|
"""
HTMLParser-based link extractor
"""
import warnings
import six
from six.moves.html_parser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from w3lib.html import strip_html5_whitespace
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
from scrapy.exceptions import ScrapyDeprecationWarning
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False,
strip=True):
HTMLParser.__init__(self)
warnings.warn(
"HtmlParserLinkExtractor is deprecated and will be removed in "
"future releases. Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.strip = strip
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, six.text_type):
link.url = link.url.encode(response_encoding)
try:
link.url = urljoin(base_url, link.url)
except ValueError:
continue
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
if self.strip:
value = strip_html5_whitespace(value)
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
|
blueset/ehForwarderBot
|
refs/heads/master
|
docs/conf.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# EH Forwarder Bot documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 28 10:17:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from os import path
from typing import Sequence
import sphinxcontrib.plantuml
from docutils import nodes
from docutils.utils.smartquotes import smartchars
from sphinx import addnodes
from sphinx.locale import get_translation
sys.path.insert(0, os.path.abspath('..'))
__version__ = "0.0.0"
exec(open('../ehforwarderbot/__version__.py').read())
MESSAGE_CATALOG_NAME = "efb_docs_config"
_ = get_translation(MESSAGE_CATALOG_NAME)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.autosectionlabel',
'sphinx.ext.intersphinx',
'sphinx_autodoc_typehints',
'sphinxcontrib.restbuilder',
'sphinxcontrib.plantuml',
'sphinx_search.extension']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'EH Forwarder Bot'
copyright = _('2016 — 2020 Eana Hufwe and the EH Forwarder Bot contributors')
author = _('Eana Hufwe, and the EH Forwarder Bot contributors')
docs_title = _('EH Forwarder Bot Documentation')
description = _('An extensible message tunneling chat bot framework.')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(__version__.split(".")[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `to\do` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
smartquotes = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_static_path = ['_static']
html_css_files = ["styles/style.css"]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
# html_theme = 'readable'
# html_logo = "_static/logo.png"
html_theme = 'alabaster'
html_sidebars = {
'index': [
'about_index.html',
'navigation.html',
'searchbox.html',
'donate.html',
'translate.html',
],
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
'translate.html',
]
}
html_theme_options = {
'logo': 'logo.png',
'logo_name': True,
'logo_text_align': 'left; font-size: 1.5em',
'touch_icon': 'logo.png',
'github_button': True,
'github_type': 'star',
'github_user': 'blueset',
'github_repo': 'ehforwarderbot',
'description': description,
'donate_url': 'https://github.com/blueset/.github',
'github_banner': "github_banner.svg",
'show_related': True,
'show_relbars': True,
'extra_nav_links': {
_('Community wiki'): 'https://efb.1a23.studio/wiki',
_('Modules repository'): 'https://efb-modules.1a23.studio/',
}
}
html_last_updated_fmt = ""
# import sphinx_py3doc_enhanced_theme
# html_theme = "sphinx_py3doc_enhanced_theme"
# html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
# sys.path.append(os.path.abspath('_themes'))
# html_theme_path = ['_themes']
# html_theme = 'kr'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ehForwarderBotDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = "xelatex"
latex_logo = "_static/logo.png"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'\usepackage{unicode-math}',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ehForwarderBot.tex', docs_title, author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ehforwarderbot', docs_title, author, 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ehForwarderBot', docs_title,
author, 'ehForwarderBot', description,
'Miscellaneous'),
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Sphinx-intl settings
locale_dirs = ['locale/']
gettext_uuid = True
gettext_compact = False
gettext_additional_targets = ['literal-block', 'image']
conversion = {
'az': 'az_AZ', 'es': 'es_VE', 'id': 'id_ID',
'it': 'it_IT', 'ja': 'ja_JP', 'ms': 'ms_MY',
'ro': 'ro_RO', 'tr': 'tr_TR', 'zh': 'zh_CN',
'en': 'en_US'
}
# # Locale fallback settings
# def locale_fallback_decorator(fun):
#
# def wrapper(self, **kwargs):
# self.config.language = conversion.get(self.config.language, self.config.language)
# return fun(self, **kwargs)
# return wrapper
#
#
# sphinx.application.Sphinx._init_i18n = locale_fallback_decorator(sphinx.application.Sphinx._init_i18n)
autosectionlabel_prefix_document = True
# This config value contains the locations and names of other projects that
# should be linked to in this documentation.
#
# Relative local paths for target locations are taken as relative to the base
# of the built documentation, while relative local paths for inventory
# locations are taken as relative to the source directory.
#
# When fetching remote inventory files, proxy settings will be read from the
# $HTTP_PROXY environment variable.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
graphviz_output_format = "svg"
plantuml_output_format = "svg"
plantuml_latex_output_format = "pdf"
_plantuml_node = sphinxcontrib.plantuml.plantuml
def preserve_original_messages(self) -> None:
"""Preserve original translatable messages."""
self.original_uml = self['uml']
self.original_caption = self.get('caption', None)
def apply_translated_message(self, original_message: str, translated_message: str) -> None:
"""Apply translated message."""
if self.original_uml == original_message:
self['uml'] = translated_message
elif self.original_caption == original_message:
self['caption'] = translated_message
def extract_original_messages(self) -> Sequence[str]:
"""Extract translation messages.
:returns: list of extracted messages or messages generator
"""
l = (self['uml'],)
if 'caption' in self:
l += (self['caption'],)
return l
sphinxcontrib.plantuml.plantuml.__bases__ = (addnodes.translatable,) + sphinxcontrib.plantuml.plantuml.__bases__
sphinxcontrib.plantuml.plantuml.preserve_original_messages = preserve_original_messages
sphinxcontrib.plantuml.plantuml.apply_translated_message = apply_translated_message
sphinxcontrib.plantuml.plantuml.extract_original_messages = extract_original_messages
def run(self):
from sphinx.util.nodes import set_source_info
from sphinx.util.i18n import search_image_for_language
warning = self.state.document.reporter.warning
env = self.state.document.settings.env
if self.arguments and self.content:
return [warning('uml directive cannot have both content and '
'a filename argument', line=self.lineno)]
if self.arguments:
fn = search_image_for_language(self.arguments[0], env)
relfn, absfn = env.relfn2path(fn)
env.note_dependency(relfn)
try:
umlcode = sphinxcontrib.plantuml._read_utf8(absfn)
except (IOError, UnicodeDecodeError) as err:
return [warning('PlantUML file "%s" cannot be read: %s'
% (fn, err), line=self.lineno)]
source = absfn
line = 1
else:
relfn = env.doc2path(env.docname, base=None)
umlcode = '\n'.join(self.content)
source, line = self.state_machine.get_source_and_line(self.content_offset)
node = sphinxcontrib.plantuml.plantuml(self.block_text, **self.options)
node['uml'] = umlcode
node['incdir'] = os.path.dirname(relfn)
node['filename'] = os.path.split(relfn)[1]
node.source, node.line = source, line
# XXX maybe this should be moved to _visit_plantuml functions. it
# seems wrong to insert "figure" node by "plantuml" directive.
if 'caption' in self.options or 'align' in self.options:
node = nodes.figure('', node)
if 'align' in self.options:
node['align'] = self.options['align']
if 'caption' in self.options:
inodes, messages = self.state.inline_text(self.options['caption'],
self.lineno)
caption_node = nodes.caption(self.options['caption'], '', *inodes)
caption_node.extend(messages)
set_source_info(self, caption_node)
node += caption_node
self.add_name(node)
if 'html_format' in self.options:
node['html_format'] = self.options['html_format']
if 'latex_format' in self.options:
node['latex_format'] = self.options['latex_format']
return [node]
def html_page_context(self, pagename, templatename, context, doctree):
# Workaround to only add extra catalog after .mo files are built.
# This would happen on readthedocs server as .mo files are only built
# during compile time. Adding extra catalog to the theme only works
# after .mo is built.
if not self.catalog_added:
package_dir = path.abspath(path.dirname(__file__))
locale_dir = os.path.join(package_dir, 'locale')
self.add_message_catalog(MESSAGE_CATALOG_NAME, locale_dir)
self.add_message_catalog("sphinx", locale_dir)
self.catalog_added = True
if context.get("language"):
context["language"] = context["language"].replace("_", "-")
sphinxcontrib.plantuml.UmlDirective.run = run
def setup(self):
self.catalog_added = False
self.connect("html-page-context", html_page_context)
self.config.language = conversion.get(self.config.language, self.config.language)
self.config.overrides['language'] = conversion.get(self.config.overrides.get('language', None),
self.config.overrides.get('language', None))
if self.config.language and self.config.language.startswith("zh"):
from pprint import pprint
# pprint(self.config.__getstate__())
latex_elements = globals().get("latex_elements", None)
pprint(latex_elements)
latex_elements['preamble'] += r"""
\usepackage[AutoFallBack=true]{xeCJK}
\setCJKmainfont{Noto Serif CJK SC}[Language=Chinese Simplified, BoldFont={* Bold}, ItalicFont=AR PL KaitiM GB]
\setCJKsansfont{Noto Sans CJK SC}[Language=Chinese Simplified, BoldFont={* Bold}, ItalicFont=AR PL KaitiM GB]
\setCJKmonofont{Noto Sans CJK SC}[Language=Chinese Simplified, BoldFont={* Bold}, ItalicFont=AR PL KaitiM GB]
\setCJKfallbackfamilyfont{\CJKrmdefault}[AutoFakeBold]{{HanaMinA},{HanaMinB}}
\setCJKfallbackfamilyfont{\CJKsfdefault}[AutoFakeBold]{{HanaMinA},{HanaMinB}}
\setCJKfallbackfamilyfont{\CJKttdefault}[AutoFakeBold]{{HanaMinA},{HanaMinB}}
"""
smartchars.quotes['zh-cn'] = smartchars.quotes['zh-tw']
|
tsdmgz/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connection/__init__.py
|
7
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import gettext
import os
import shlex
from abc import abstractmethod, abstractproperty
from functools import wraps
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import shell_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
BUFSIZE = 65536
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(AnsiblePlugin):
'''
A base class for connections to contain common code.
'''
has_pipelining = False
has_native_async = False # eg, winrm
always_pipeline_modules = False # eg, winrm
become_methods = C.BECOME_METHODS
# When running over this connection type, prefer modules written in a certain language
# as discovered by the specified file extension. An empty string as the
# language means any language.
module_implementation_preferences = ('',)
allow_executable = True
# the following control whether or not the connection supports the
# persistent connection framework or not
supports_persistence = False
force_persistence = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(ConnectionBase, self).__init__()
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
self._play_context = play_context
if not hasattr(self, '_new_stdin'):
self._new_stdin = new_stdin
# Backwards compat: self._display isn't really needed, just import the global display and use that.
if not hasattr(self, '_display'):
self._display = display
if not hasattr(self, '_connected'):
self._connected = False
self.success_key = None
self.prompt = None
self._connected = False
self._socket_path = None
# load the shell plugin for this action/connection
if play_context.shell:
shell_type = play_context.shell
elif hasattr(self, '_shell_type'):
shell_type = getattr(self, '_shell_type')
else:
shell_type = 'sh'
shell_filename = os.path.basename(self._play_context.executable)
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
self._shell = shell_loader.get(shell_type)
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
@property
def connected(self):
'''Read-only property holding whether the connection to the remote host is active or closed.'''
return self._connected
@property
def socket_path(self):
'''Read-only property holding the connection socket path for this remote host'''
return self._socket_path
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
if self._play_context.become_method in self.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % self._play_context.become_method)
@staticmethod
def _split_ssh_args(argstring):
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
try:
# Python 2.6.x shlex doesn't handle unicode type so we have to
# convert args to byte string for that case. More efficient to
# try without conversion first but python2.6 doesn't throw an
# exception, it merely mangles the output:
# >>> shlex.split(u't e')
# ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
except AttributeError:
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
@abstractproperty
def transport(self):
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self):
"""Connect to the host we've been initialized with"""
# Check if PE is supported
if self._play_context.become:
self._become_method_supported()
@ensure_connect
@abstractmethod
def exec_command(self, cmd, in_data=None, sudoable=True):
"""Run a command on the remote host.
:arg cmd: byte string containing the command
:kwarg in_data: If set, this data is passed to the command's stdin.
This is used to implement pipelining. Currently not all
connection plugins implement pipelining.
:kwarg sudoable: Tell the connection plugin if we're executing
a command via a privilege escalation mechanism. This may affect
how the connection plugin returns data. Note that not all
connections can handle privilege escalation.
:returns: a tuple of (return code, stdout, stderr) The return code is
an int while stdout and stderr are both byte strings.
When a command is executed, it goes through multiple commands to get
there. It looks approximately like this::
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
:LocalShell: Is optional. It is run locally to invoke the
``Connection Command``. In most instances, the
``ConnectionCommand`` can be invoked directly instead. The ssh
connection plugin which can have values that need expanding
locally specified via ssh_args is the sole known exception to
this. Shell metacharacters in the command itself should be
processed on the remote machine, not on the local machine so no
shell is needed on the local machine. (Example, ``/bin/sh``)
:ConnectionCommand: This is the command that connects us to the remote
machine to run the rest of the command. ``ansible_ssh_user``,
``ansible_ssh_host`` and so forth are fed to this piece of the
command to connect to the correct host (Examples ``ssh``,
``chroot``)
:UsersLoginShell: This shell may or may not be created depending on
the ConnectionCommand used by the connection plugin. This is the
shell that the ``ansible_ssh_user`` has configured as their login
shell. In traditional UNIX parlance, this is the last field of
a user's ``/etc/passwd`` entry We do not specifically try to run
the ``UsersLoginShell`` when we connect. Instead it is implicit
in the actions that the ``ConnectionCommand`` takes when it
connects to a remote machine. ``ansible_shell_type`` may be set
to inform ansible of differences in how the ``UsersLoginShell``
handles things like quoting if a shell has different semantics
than the Bourne shell.
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
``ansible_shell_executable`` or via
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
We explicitly invoke this shell so that we have predictable
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
settable by the user because some sudo setups may only allow
invoking a specific shell. (For instance, ``/bin/bash`` may be
allowed but ``/bin/sh``, our default, may not). We invoke this
twice, once after the ``ConnectionCommand`` and once after the
``BecomeCommand``. After the ConnectionCommand, this is run by
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
privilege escalation. Setting this up is performed by the action
plugin prior to running ``exec_command``. So we just get passed
:param:`cmd` which has the BecomeCommand already added.
(Examples: sudo, su) If we have a BecomeCommand then we will
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
have a consistent view of quoting.
:Command: Is the command we're actually trying to run remotely.
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
@abstractmethod
def close(self):
"""Terminate the connection"""
pass
def check_become_success(self, b_output):
b_success_key = to_bytes(self._play_context.success_key)
for b_line in b_output.splitlines(True):
if b_success_key == b_line.rstrip():
return True
return False
def check_password_prompt(self, b_output):
if self._play_context.prompt is None:
return False
elif isinstance(self._play_context.prompt, string_types):
b_prompt = to_bytes(self._play_context.prompt).strip()
b_lines = b_output.splitlines()
return any(l.strip().startswith(b_prompt) for l in b_lines)
else:
return self._play_context.prompt(b_output)
def check_incorrect_password(self, b_output):
b_incorrect_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method]))
return b_incorrect_password and b_incorrect_password in b_output
def check_missing_password(self, b_output):
b_missing_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_MISSING_STRINGS[self._play_context.become_method]))
return b_missing_password and b_missing_password in b_output
def connection_lock(self):
f = self._play_context.connection_lockfd
display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
fcntl.lockf(f, fcntl.LOCK_EX)
display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self):
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def reset(self):
display.warning("Reset is not implemented for this connection")
|
cybergarage/round-py
|
refs/heads/master
|
tests/unit/method_test.py
|
1
|
#################################################################
#
# Round for Python
#
# Copyright (C) Satoshi Konno 2016
#
# This is licensed under BSD-style license, see file COPYING.
#
##################################################################
import pytest
from round import Method
def test_bad_method():
method = Method()
assert not method.is_valid()
|
damngamerz/coala
|
refs/heads/master
|
tests/collecting/collectors_test_dir/bears/bear1.py
|
35
|
import multiprocessing
from coalib.bears.Bear import Bear
from coalib.settings.Section import Section
class TestBear(Bear):
def __init__(self):
Bear.__init__(self, Section('settings'), multiprocessing.Queue())
@staticmethod
def kind():
return 'kind'
def origin(self):
return __file__
class NoKind():
def __init__(self):
pass
@staticmethod
def kind():
raise NotImplementedError
|
leotada/PyNFe
|
refs/heads/master
|
pynfe/entidades/transportadora.py
|
2
|
# -*- coding: utf-8 -*-
from .base import Entidade
from pynfe.utils.flags import TIPOS_DOCUMENTO
class Transportadora(Entidade):
# Dados da Transportadora
# - Nome/Razão Social (obrigatorio)
razao_social = str()
# - Tipo de Documento (obrigatorio) - default CNPJ
tipo_documento = 'CNPJ'
# - Numero do Documento (obrigatorio)
numero_documento = str()
# - Inscricao Estadual
inscricao_estadual = str()
# Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - UF (obrigatorio)
endereco_uf = str()
# - Municipio (obrigatorio)
endereco_municipio = str()
def __str__(self):
return ' '.join([self.tipo_documento, self.numero_documento])
|
avelis/django-plantains
|
refs/heads/master
|
plantains/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from django.conf import settings
default_settings = {
'MAILCHIMP_CLIENT_ID': None,
'MAILCHIMP_CLIENT_SECRET': None,
'MAILCHIMP_AUTHORIZATION_URL': 'https://login.mailchimp.com/oauth2/authorize',
'MAILCHIMP_ACCESS_TOKEN_URL': 'https://login.mailchimp.com/oauth2/token',
'MAILCHIMP_METADATA_URL': 'https://login.mailchimp.com/oauth2/metadata',
'MAILCHIMP_SUCCESS_REDIRECT_URL': '/',
}
class Settings(object):
def __init__(self, app_settings, defaults):
for k, v in defaults.iteritems():
setattr(self, k, getattr(app_settings, k, v))
settings = Settings(settings, default_settings)
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/or_lookups/tests.py
|
150
|
from __future__ import absolute_import
from datetime import datetime
from operator import attrgetter
from django.db.models import Q
from django.test import TestCase
from .models import Article
class OrLookupsTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline='Hello', pub_date=datetime(2005, 11, 27)
).pk
self.a2 = Article.objects.create(
headline='Goodbye', pub_date=datetime(2005, 11, 28)
).pk
self.a3 = Article.objects.create(
headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
).pk
def test_filter_or(self):
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
def test_stages(self):
# You can shorten this syntax with code like the following, which is
# especially useful if building the query in stages:
articles = Article.objects.all()
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
[]
)
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
'Hello and goodbye'
],
attrgetter("headline")
)
def test_pk_q(self):
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_pk_in(self):
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_negated(self):
# Q objects can be negated
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
'Hello',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
'Hello and goodbye'
],
attrgetter("headline"),
)
# This allows for more complex queries than filter() and exclude()
# alone would allow
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
'Hello'
],
attrgetter("headline"),
)
def test_complex_filter(self):
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup
# arguments but need to support arbitrary queries via Q objects too.
self.assertQuerysetEqual(
Article.objects.complex_filter({'pk': self.a1}), [
'Hello'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline"),
)
def test_empty_in(self):
# Passing "in" an empty list returns no results ...
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[]),
[]
)
# ... but can return results if we OR it with another query.
self.assertQuerysetEqual(
Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_and(self):
# Q arg objects are ANDed
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
'Hello and goodbye'
],
attrgetter("headline")
)
# Q arg AND order is irrelevant
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
[]
)
def test_q_exclude(self):
self.assertQuerysetEqual(
Article.objects.exclude(Q(headline__startswith='Hello')), [
'Goodbye'
],
attrgetter("headline")
)
def test_other_arg_queries(self):
# Try some arg queries with operations other than filter.
self.assertEqual(
Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
'Hello and goodbye'
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
3
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
{"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
],
lambda o: o,
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
{self.a1: Article.objects.get(pk=self.a1)}
)
|
srkukarni/heron
|
refs/heads/master
|
third_party/pex/setup.py
|
11
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:
LONG_DESCRIPTION = fp.read()
# This seems to be a fairly standard version file pattern.
#
# Populates the following variables:
# __version__
# __setuptools_requirement
# __wheel_requirement
__version__ = ''
version_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')
with open(version_py_file) as version_py:
exec(compile(version_py.read(), version_py_file, 'exec'))
setup(
name = 'pex',
version = __version__,
description = "The PEX packaging toolchain.",
long_description = LONG_DESCRIPTION,
url = 'https://github.com/pantsbuild/pex',
license = 'Apache License, Version 2.0',
zip_safe = True,
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
packages = [
'pex',
'pex.bin',
],
install_requires = [
SETUPTOOLS_REQUIREMENT,
],
tests_require = [
'mock',
'twitter.common.contextutil>=0.3.1,<0.4.0',
'twitter.common.lang>=0.3.1,<0.4.0',
'twitter.common.testing>=0.3.1,<0.4.0',
'twitter.common.dirutil>=0.3.1,<0.4.0',
'pytest',
],
entry_points = {
'console_scripts': [
'pex = pex.bin.pex:main',
],
},
)
|
mheap/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_vrf_af.py
|
53
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
safi:
description:
- Sub Address-Family Identifier (SAFI).
- Deprecated in 2.4
required: true
choices: ['unicast', 'multicast']
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
type: bool
state:
description:
- Determines whether the config should be present or
not on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast"]
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
def main():
argument_spec = dict(
vrf=dict(required=True),
afi=dict(required=True, choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
safi=dict(choices=['unicast', 'multicast'], removed_in_version="2.4"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config_text = get_config(module)
config = NetworkConfig(indent=2, contents=config_text)
path = ['vrf context %s' % module.params['vrf'],
'address-family %s unicast' % module.params['afi']]
try:
current = config.get_block_config(path)
except ValueError:
current = None
commands = list()
if current and module.params['state'] == 'absent':
commands.append('no address-family %s unicast' % module.params['afi'])
elif module.params['state'] == 'present':
if current:
have = 'route-target both auto evpn' in current
if module.params['route_target_both_auto_evpn'] is not None:
want = bool(module.params['route_target_both_auto_evpn'])
if want and not have:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('route-target both auto evpn')
elif have and not want:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('no route-target both auto evpn')
else:
commands.append('address-family %s unicast' % module.params['afi'])
if module.params['route_target_both_auto_evpn']:
commands.append('route-target both auto evpn')
if commands:
commands.insert(0, 'vrf context %s' % module.params['vrf'])
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ntuaha/NewsInsight
|
refs/heads/master
|
src/extract/MYDB.py
|
1
|
# -*- coding: utf-8 -*-
import re
#處理掉unicode 和 str 在ascii上的問題
import sys
import os
import psycopg2
import datetime
#import calendar
#import csv
#import math
#from time import mktime as mktime
import cookielib, urllib2,urllib
from lxml import html,etree
import StringIO
reload(sys)
sys.setdefaultencoding('utf8')
class MYDB:
database=""
user=""
password=""
host=""
port=""
conn = None
cur = None
def __init__(self,filepath):
f = open(filepath,'r')
self.database = f.readline()[:-1]
self.user = f.readline()[:-1]
self.password = f.readline()[:-1]
self.host = f.readline()[:-1]
self.port =f.readline()[:-1]
f.close()
self.startDB()
#啟用DB
def startDB(self):
self.conn = psycopg2.connect(database=self.database, user=self.user, password=self.password, host=self.host, port=self.port)
self.cur = self.conn.cursor()
#結束DB
def endDB(self):
self.cur.close()
self.conn.close()
def sendtoFB(self,year,month):
sql = "SELECT sum(newscount),sum(extract(epoch from process_time))/sum(newscount) from crawler_record where date_trunc('month',data_dt)='%04d-%02d-01' and source='%s';"%(year,month,self.table)
self.cur.execute(sql)
(num,speed) = self.cur.fetchall()[0]
Title = "完成"
Result = "成功"
print num
print speed
Detail = "%s 新聞資料獲得,得到%d筆,每筆約花%.02f秒"%("%04d年%02d月"%(year,month),num,speed)
api = '688430041191592';
api_secret = '6bb097ca9fe10f1bca0c1c320232eba2';
callback_website = 'https://github.com/ntuaha/TWFS/';
picture_url_tick = 'http://www.iconarchive.com/icons/pixelmixer/basic/64/tick-icon.png';
facebook_id = '100000185149998';
cmd = os.popen("/usr/bin/curl -F grant_type=client_credentials -F client_id=%s -F client_secret=%s -k https://graph.facebook.com/oauth/access_token"%(api,api_secret))
k = cmd.read()
access_token = k.split("=")[1]
work = "/usr/bin/curl -F 'access_token=%s' -F 'message=%s' -F 'name=%s' -F 'picture=%s' -F 'caption=%s' -k https://graph.facebook.com/%s/feed"%(access_token,Detail,Title,picture_url_tick,Result,facebook_id)
#print work
cmd = os.popen(work)
|
datakurre/xhtml2pdf
|
refs/heads/master
|
demo/tgpisa/tgpisa/release.py
|
169
|
# Release information about tgpisa
version = "1.0"
# description = "Your plan to rule the world"
# long_description = "More description about your plan"
# author = "Your Name Here"
# email = "YourEmail@YourDomain"
# copyright = "Vintage 2006 - a good year indeed"
# if it's open source, you might want to specify these
# url = "http://yourcool.site/"
# download_url = "http://yourcool.site/download"
# license = "MIT"
|
soindy/rabbitmq-c
|
refs/heads/master
|
.ycm_extra_conf.py
|
9
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu90',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-I', './librabbitmq',
'-D', 'HAVE_POLL',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
relative_to = DirectoryOfThisScript()
return {
'flags': MakeRelativePathsInFlagsAbsolute( flags, relative_to ),
'do_cache': True
}
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
maxamillion/ansible
|
refs/heads/devel
|
test/lib/ansible_test/_internal/init.py
|
68
|
"""Early initialization for ansible-test before most other imports have been performed."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import resource
from .constants import (
SOFT_RLIMIT_NOFILE,
)
CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
|
domob1812/bitcoin
|
refs/heads/master
|
contrib/linearize/linearize-data.py
|
37
|
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
import glob
from collections import namedtuple
from binascii import unhexlify
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.hex()
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r", encoding="utf8")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# This gets the first block file ID that exists from the input block
# file directory.
def getFirstBlockFileId(block_dir_path):
# First, this sets up a pattern to search for block files, for
# example 'blkNNNNN.dat'.
blkFilePattern = os.path.join(block_dir_path, "blk[0-9][0-9][0-9][0-9][0-9].dat")
# This search is done with glob
blkFnList = glob.glob(blkFilePattern)
if len(blkFnList) == 0:
print("blocks not pruned - starting at 0")
return 0
# We then get the lexicographic minimum, which should be the first
# block file name.
firstBlkFilePath = min(blkFnList)
firstBlkFn = os.path.basename(firstBlkFilePath)
# now, the string should be ['b','l','k','N','N','N','N','N','.','d','a','t']
# So get the ID by choosing: 3 4 5 6 7
# The ID is not necessarily 0 if this is a pruned node.
blkId = int(firstBlkFn[3:8])
return blkId
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
# Get first occurring block file id - for pruned nodes this
# will not necessarily be 0
self.inFn = getFirstBlockFileId(self.settings['input'])
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
# Seek backwards 7 bytes (skipping the first byte in the previous search)
# and continue searching from the new position if the magic bytes are not
# found.
self.inF.seek(-7, os.SEEK_CUR)
continue
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
YingHsuan/termite_data_server
|
refs/heads/master
|
web2py/gluon/contrib/login_methods/motp_auth.py
|
44
|
#!/usr/bin/env python
import time
from hashlib import md5
from gluon.dal import DAL
def motp_auth(db=DAL('sqlite://storage.sqlite'),
time_offset=60):
"""
motp allows you to login with a one time password(OTP) generated on a motp client,
motp clients are available for practically all platforms.
to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password
to know more visit http://motp.sourceforge.net
Written by Madhukar R Pai (madspai@gmail.com)
License : MIT or GPL v2
thanks and credits to the web2py community
to use motp_auth:
motp_auth.py has to be located in gluon/contrib/login_methods/ folder
first auth_user has to have 2 extra fields - motp_secret and motp_pin
for that define auth like shown below:
## after auth = Auth(db)
db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=''),
Field('last_name', length=128, default=''),
Field('email', length=128, default='', unique=True), # required
Field('password', 'password', length=512, # required
readable=False, label='Password'),
Field('motp_secret',length=512,default='',
label='MOTP Seceret'),
Field('motp_pin',length=128,default='',
label='MOTP PIN'),
Field('registration_key', length=512, # required
writable=False, readable=False, default=''),
Field('reset_password_key', length=512, # required
writable=False, readable=False, default=''),
Field('registration_id', length=512, # required
writable=False, readable=False, default=''))
##validators
custom_auth_table = db[auth.settings.table_user_name]
# get the custom_auth_table
custom_auth_table.first_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.last_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.password.requires = CRYPT()
custom_auth_table.email.requires = [
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, custom_auth_table.email)]
auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
## before auth.define_tables()
##after that:
from gluon.contrib.login_methods.motp_auth import motp_auth
auth.settings.login_methods.append(motp_auth(db=db))
##Instructions for using MOTP
- after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc)
- initialize the motp client (to reset a motp secret type in #**#),
During user creation enter the secret generated during initialization into the motp_secret field in auth_user and
similarly enter a pre-decided pin into the motp_pin
- done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password
###To Dos###
- both motp_secret and pin are stored in plain text! need to have some way of encrypting
- web2py stores the password in db on successful login (should not happen)
- maybe some utility or page to check the otp would be useful
- as of now user field is hardcoded to email. Some way of selecting user table and user field.
"""
def verify_otp(otp, pin, secret, offset=60):
epoch_time = int(time.time())
time_start = int(str(epoch_time - offset)[:-1])
time_end = int(str(epoch_time + offset)[:-1])
for t in range(time_start - 1, time_end + 1):
to_hash = str(t) + secret + pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash:
return True
return False
def motp_auth_aux(email,
password,
db=db,
offset=time_offset):
if db:
user_data = db(db.auth_user.email == email).select().first()
if user_data:
if user_data['motp_secret'] and user_data['motp_pin']:
motp_secret = user_data['motp_secret']
motp_pin = user_data['motp_pin']
otp_check = verify_otp(
password, motp_pin, motp_secret, offset=offset)
if otp_check:
return True
else:
return False
else:
return False
return False
return motp_auth_aux
|
gangadharkadam/vervefrappe
|
refs/heads/v5.0
|
frappe/workflow/__init__.py
|
12133432
| |
abhiii5459/sympy
|
refs/heads/master
|
sympy/solvers/benchmarks/__init__.py
|
12133432
| |
Ingenico-ePayments/connect-sdk-python2
|
refs/heads/master
|
tests/integration/test_connection_pooling.py
|
1
|
import unittest
import threading
import timeit
import tests.integration.init_utils as init_utils
from tests.integration.init_utils import MERCHANT_ID
from ingenico.connect.sdk.merchant.services.convert_amount_params import ConvertAmountParams
from ingenico.connect.sdk.factory import Factory
class ConnectionPoolingTest(unittest.TestCase):
"""Performs multiple threaded server requests with connection pooling in order to test thread-safety and concurrency
"""
def setUp(self):
self.flag = threading.Event() # flag to synchronise a start moment for the threads
self.result_list = [] # list to collect results from the threads
self.lock = threading.RLock() # mutex lock for the threads to provide concurrent access to the result list
def test_connection_pool_max_is_count(self):
"""Test with one pool per request"""
self.run_connection_pooling_test(10, 10)
def test_connection_pool_max_is_half(self):
"""Test with one pool per two requests"""
self.run_connection_pooling_test(10, 5)
def test_connection_pool_max_is_one(self):
"""Test with one pool for all 10 requests"""
self.run_connection_pooling_test(10, 1)
def run_connection_pooling_test(self, request_count, max_connections):
"""Sends *request_count* requests with a maximum number of connection pools equal to *max_connections*"""
communicator_configuration = init_utils.create_communicator_configuration(max_connections=max_connections)
with Factory.create_communicator_from_configuration(communicator_configuration) as communicator:
# Create a number of runner threads that will execute send_request
runner_threads = [
threading.Thread(target=self.send_request, args=(i, communicator)) for i in range(0, request_count)
]
for thread in runner_threads:
thread.start()
self.flag.set()
# wait until threads are done before closing the communicator
for i in range(0, request_count - 1):
runner_threads[i].join()
print "(*start time*, *end time*) for {} connection pools".format(max_connections)
for item in self.result_list:
if isinstance(item, Exception):
self.fail("an exception occurred in one of the threads:/n" + str(item))
else:
print repr(item)
# check server logs for information about concurrent use of connections
def send_request(self, i, communicator):
"""runs a (concurrent) request"""
request = ConvertAmountParams()
request.source = "USD"
request.target = "EUR"
request.amount = (100L * (i + 1))
try:
client = Factory.create_client_from_communicator(communicator)
self.flag.wait()
start_time = timeit.default_timer()
dummy = client.merchant(MERCHANT_ID).services().convert_amount(request).converted_amount
end_time = timeit.default_timer()
with self.lock:
self.result_list.append((start_time, end_time))
except Exception as e:
with self.lock:
self.result_list.append(e)
# check server logs for additional data about the requests sent
if __name__ == '__main__':
unittest.main()
|
metashell/metashell
|
refs/heads/master
|
3rd/templight/llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
|
17
|
import os
import sys
def execute(fileName):
sys.stderr.write("error: external '{}' command called unexpectedly\n"
.format(os.path.basename(fileName)));
sys.exit(1)
|
xiangke/pycopia
|
refs/heads/master
|
core/pycopia/OS/Linux/__init__.py
|
1
|
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
This package contains modules specific to exploiting Linux kernel features or
drivers.
"""
__all__ = ['IOCTL', 'procfs', 'rtc', 'sysctl', 'Input']
|
xin3liang/platform_external_chromium_org_third_party_WebKit
|
refs/heads/master
|
Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
|
59
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import unittest
from webkitpy.common.system import filesystem_mock
from webkitpy.common.system import filesystem_unittest
class MockFileSystemTest(unittest.TestCase, filesystem_unittest.GenericFileSystemTests):
def setUp(self):
self.fs = filesystem_mock.MockFileSystem()
self.setup_generic_test_dir()
def tearDown(self):
self.teardown_generic_test_dir()
self.fs = None
def quick_check(self, test_fn, good_fn, *tests):
for test in tests:
if hasattr(test, '__iter__'):
expected = good_fn(*test)
actual = test_fn(*test)
else:
expected = good_fn(test)
actual = test_fn(test)
self.assertEqual(expected, actual, 'given %s, expected %s, got %s' % (repr(test), repr(expected), repr(actual)))
def test_join(self):
self.quick_check(self.fs.join,
self.fs._slow_but_correct_join,
('',),
('', 'bar'),
('foo',),
('foo/',),
('foo', ''),
('foo/', ''),
('foo', 'bar'),
('foo', '/bar'),
)
def test_normpath(self):
self.quick_check(self.fs.normpath,
self.fs._slow_but_correct_normpath,
'',
'/',
'.',
'/.',
'foo',
'foo/',
'foo/.',
'foo/bar',
'/foo',
'foo/../bar',
'foo/../bar/baz',
'../foo')
def test_relpath_win32(self):
pass
|
Paul-Ezell/cinder-1
|
refs/heads/master
|
cinder/tests/unit/api/v2/test_limits.py
|
18
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from xml.dom import minidom
from lxml import etree
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from six.moves import range
import webob
from cinder.api.v2 import limits
from cinder.api import views
from cinder.api import xmlutil
import cinder.context
from cinder import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0',
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = cinder.context.RequestContext('testuser', 'testproject')
request.environ["cinder.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'gigabytes': 512,
'volumes': 5,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(expected, value)
def test_limited_request_xml(self):
"""Test a rate-limited (413) response as XML."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
request.accept = "application/xml"
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
root = minidom.parseString(response.body).childNodes[0]
expected = "Only 1 GET request(s) can be made to * every minute."
details = root.getElementsByTagName("details")
self.assertEqual(1, details.length)
value = details.item(0).firstChild.data.strip()
self.assertEqual(expected, value)
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""Tests for the default limits parser in the `limits.Limiter` class."""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, six.text_type(e)
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'limits.user3': '',
'limits.user0': '(get, *, .*, 4, minute);'
'(put, *, .*, 2, minute)'}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""Ensure no delay on a single call for a limit verb we didn't set."""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_no_delay_PUT(self):
"""Ensure no delay on a single call for a known limit."""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual((None, None), delay)
def test_delay_PUT(self):
"""Test delay on 11th PUT request.
Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""Test delay on 8th POST request.
Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.assertAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""Ensure the 11th GET will result in NO delay."""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 4 + [15.0]
results = list(self._check(5, "GET", "/foo", "user0"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""Test delay on /volumes.
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere
is still OK after 5 requests...but then after 11 total requests,
PUT limiting kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/volumes"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Test limit is lifted again.
Ensure after hitting the limit and then waiting for
the correct amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""Ensure multiple requests still get a delay."""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""Test user-specific limits."""
self.assertEqual([], self.limiter.levels['user3'])
self.assertEqual(2, len(self.limiter.levels['user0']))
def test_multiple_users(self):
"""Tests involving multiple users."""
# User0
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User0 again
expected = [28.0]
results = list(self._check(1, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
self.time += 28.0
expected = [None, 30.0]
results = list(self._check(2, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""POST request to given url by given username.
Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(403, response.status_int)
return response.headers["X-Wait-Seconds"]
self.assertEqual(204, response.status_int)
def test_invalid_methods(self):
"""Only POSTs should work."""
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(405, response.status_int)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual('60.00', delay)
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual('60.00', delay)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual('60.00', delay)
class FakeHttplibSocket(object):
"""Fake `http_client.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
self._buffer = six.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `http_client.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Fake request handler.
Requests made via this connection actually get translated and
routed into our WSGI app, we then wait for the response and turn
it back into an `http_client.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = http_client.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
http_client.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Decorator to mock the HTTPConecction class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = http_client.HTTPConnection
new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection)
http_client.HTTPConnection = new_http_connection
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""setUp() for WsgiLimiterProxyTest.
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `http_client` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
self.addCleanup(self._restore, oldHTTPConnection)
def _restore(self, oldHTTPConnection):
# restore original HTTPConnection object
http_client.HTTPConnection = oldHTTPConnection
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual((None, None), delay)
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
"made to /delayed every minute.")
self.assertEqual(expected, (delay, error))
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/volumes",
"regex": "^/volumes",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"metadata_items": 1,
"injected_files": 5,
"injected_file_content_bytes": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06"
expected_limits = {
"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(expected_limits, output)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictMatch(expected_limits, output)
class LimitsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
tdate = "2011-12-15T22:42:45Z"
serializer = limits.LimitsTemplate()
fixture = {"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(4, len(absolutes))
for limit in absolutes:
name = limit.get('name')
value = limit.get('value')
self.assertEqual(str(fixture['limits']['absolute'][name]), value)
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(2, len(rates))
for i, rate in enumerate(rates):
for key in ['uri', 'regex']:
self.assertEqual(str(fixture['limits']['rate'][i][key]),
rate.get(key))
rate_limits = rate.xpath('ns:limit', namespaces=NS)
self.assertEqual(1, len(rate_limits))
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(
str(fixture['limits']['rate'][i]['limit'][j][key]),
limit.get(key))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(0, len(absolutes))
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(0, len(rates))
|
caveman-dick/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/pulp_repo.py
|
33
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Joe Adams <@sysadmind>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pulp_repo
author: "Joe Adams (@sysadmind)"
short_description: Add or remove Pulp repos from a remote host.
description:
- Add or remove Pulp repos from a remote host.
version_added: "2.3"
requirements: []
options:
add_export_distributor:
description:
- Whether or not to add the export distributor to new C(rpm) repositories.
required: false
default: false
feed:
description:
- Upstream feed URL to receive updates from.
required: false
default: null
force_basic_auth:
description:
- httplib2, the library used by the M(uri) module only sends
authentication information when a webservice responds to an initial
request with a 401 status. Since some basic auth services do not
properly send a 401, logins will fail. This option forces the sending of
the Basic authentication header upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
importer_ssl_ca_cert:
description:
- CA certificate string used to validate the feed source SSL certificate.
This can be the file content or the path to the file.
required: false
default: null
importer_ssl_client_cert:
description:
- Certificate used as the client certificate when synchronizing the
repository. This is used to communicate authentication information to
the feed source. The value to this option must be the full path to the
certificate. The specified file may be the certificate itself or a
single file containing both the certificate and private key. This can be
the file content or the path to the file.
required: false
default: null
importer_ssl_client_key:
description:
- Private key to the certificate specified in I(importer_ssl_client_cert),
assuming it is not included in the certificate file itself. This can be
the file content or the path to the file.
required: false
default: null
name:
description:
- Name of the repo to add or remove. This correlates to repo-id in Pulp.
required: true
proxy_host:
description:
- Proxy url setting for the pulp repository importer. This is in the
format scheme://host.
required: false
default: null
proxy_port:
description:
- Proxy port setting for the pulp repository importer.
required: false
default: null
publish_distributor:
description:
- Distributor to use when state is C(publish). The default is to
publish all distributors.
required: false
pulp_host:
description:
- URL of the pulp server to connect to.
default: http://127.0.0.1
relative_url:
description:
- Relative URL for the local repository.
required: true
default: null
repo_type:
description:
- Repo plugin type to use (i.e. C(rpm), C(docker)).
default: rpm
serve_http:
description:
- Make the repo available over HTTP.
required: false
default: false
serve_https:
description:
- Make the repo available over HTTPS.
required: false
default: true
state:
description:
- The repo state. A state of C(sync) will queue a sync of the repo.
This is asynchronous but not delayed like a scheduled sync. A state of
C(publish) will use the repository's distributor to publish the content.
required: false
default: present
choices: [ "present", "absent", "sync", "publish" ]
url_password:
description:
- The password for use in HTTP basic authentication to the pulp API.
If the I(url_username) parameter is not specified, the I(url_password)
parameter will not be used.
required: false
url_username:
description:
- The username for use in HTTP basic authentication to the pulp API.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
used on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: [ "yes", "no" ]
wait_for_completion:
description:
- Wait for asynchronous tasks to complete before returning.
required: false
default: 'no'
choices: [ "yes", "no" ]
notes:
- This module can currently only create distributors and importers on rpm
repositories. Contributions to support other repo types are welcome.
'''
EXAMPLES = '''
- name: Create a new repo with name 'my_repo'
pulp_repo:
name: my_repo
relative_url: my/repo
state: present
- name: Create a repo with a feed and a relative URL
pulp_repo:
name: my_centos_updates
repo_type: rpm
feed: http://mirror.centos.org/centos/6/updates/x86_64/
relative_url: centos/6/updates
url_username: admin
url_password: admin
force_basic_auth: yes
state: present
- name: Remove a repo from the pulp server
pulp_repo:
name: my_old_repo
repo_type: rpm
state: absent
'''
RETURN = '''
repo:
description: Name of the repo that the action was performed on.
returned: success
type: string
sample: my_repo
'''
import json
import os
from time import sleep
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.urls import url_argument_spec
class pulp_server(object):
"""
Class to interact with a Pulp server
"""
def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
self.module = module
self.host = pulp_host
self.repo_type = repo_type
self.repo_cache = dict()
self.wait_for_completion = wait_for_completion
def check_repo_exists(self, repo_id):
try:
self.get_repo_config_by_id(repo_id)
except IndexError:
return False
else:
return True
def compare_repo_distributor_config(self, repo_id, **kwargs):
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
for key, value in kwargs.items():
if not distributor['config'][key] == value:
return False
return True
def compare_repo_importer_config(self, repo_id, **kwargs):
repo_config = self.get_repo_config_by_id(repo_id)
for importer in repo_config['importers']:
for key, value in kwargs.items():
if value is not None:
if key not in importer['config'].keys():
return False
if not importer['config'][key] == value:
return False
return True
def create_repo(
self,
repo_id,
relative_url,
feed=None,
serve_http=False,
serve_https=True,
proxy_host=None,
proxy_port=None,
ssl_ca_cert=None,
ssl_client_cert=None,
ssl_client_key=None,
add_export_distributor=False
):
url = "%s/pulp/api/v2/repositories/" % self.host
data = dict()
data['id'] = repo_id
data['distributors'] = []
if self.repo_type == 'rpm':
yum_distributor = dict()
yum_distributor['distributor_id'] = "yum_distributor"
yum_distributor['distributor_type_id'] = "yum_distributor"
yum_distributor['auto_publish'] = True
yum_distributor['distributor_config'] = dict()
yum_distributor['distributor_config']['http'] = serve_http
yum_distributor['distributor_config']['https'] = serve_https
yum_distributor['distributor_config']['relative_url'] = relative_url
data['distributors'].append(yum_distributor)
if add_export_distributor:
export_distributor = dict()
export_distributor['distributor_id'] = "export_distributor"
export_distributor['distributor_type_id'] = "export_distributor"
export_distributor['auto_publish'] = False
export_distributor['distributor_config'] = dict()
export_distributor['distributor_config']['http'] = serve_http
export_distributor['distributor_config']['https'] = serve_https
export_distributor['distributor_config']['relative_url'] = relative_url
data['distributors'].append(export_distributor)
data['importer_type_id'] = "yum_importer"
data['importer_config'] = dict()
if feed:
data['importer_config']['feed'] = feed
if proxy_host:
data['importer_config']['proxy_host'] = proxy_host
if proxy_port:
data['importer_config']['proxy_port'] = proxy_port
if ssl_ca_cert:
data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
if ssl_client_cert:
data['importer_config']['ssl_client_cert'] = ssl_client_cert
if ssl_client_key:
data['importer_config']['ssl_client_key'] = ssl_client_key
data['notes'] = {
"_repo-type": "rpm-repo"
}
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 201:
self.module.fail_json(
msg="Failed to create repo.",
status_code=info['status'],
response=info['msg'],
url=url)
else:
return True
def delete_repo(self, repo_id):
url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
response, info = fetch_url(self.module, url, data='', method='DELETE')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to delete repo.",
status_code=info['status'],
response=info['msg'],
url=url)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def get_repo_config_by_id(self, repo_id):
if repo_id not in self.repo_cache.keys():
repo_array = [x for x in self.repo_list if x['id'] == repo_id]
self.repo_cache[repo_id] = repo_array[0]
return self.repo_cache[repo_id]
def publish_repo(self, repo_id, publish_distributor):
url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
# If there's no distributor specified, we will publish them all
if publish_distributor is None:
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
data = dict()
data['id'] = distributor['id']
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to publish the repo.",
status_code=info['status'],
response=info['msg'],
url=url,
distributor=distributor['id'])
else:
data = dict()
data['id'] = publish_distributor
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to publish the repo",
status_code=info['status'],
response=info['msg'],
url=url,
distributor=publish_distributor)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def sync_repo(self, repo_id):
url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
response, info = fetch_url(self.module, url, data='', method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to schedule a sync of the repo.",
status_code=info['status'],
response=info['msg'],
url=url)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def update_repo_distributor_config(self, repo_id, **kwargs):
url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
distributor_url = "%s%s/" % (url, distributor['id'])
data = dict()
data['distributor_config'] = dict()
for key, value in kwargs.items():
data['distributor_config'][key] = value
response, info = fetch_url(
self.module,
distributor_url,
data=json.dumps(data),
method='PUT')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to set the relative url for the repository.",
status_code=info['status'],
response=info['msg'],
url=url)
def update_repo_importer_config(self, repo_id, **kwargs):
url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
data = dict()
importer_config = dict()
for key, value in kwargs.items():
if value is not None:
importer_config[key] = value
data['importer_config'] = importer_config
if self.repo_type == 'rpm':
data['importer_type_id'] = "yum_importer"
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to set the repo importer configuration",
status_code=info['status'],
response=info['msg'],
importer_config=importer_config,
url=url)
def set_repo_list(self):
url = "%s/pulp/api/v2/repositories/?details=true" % self.host
response, info = fetch_url(self.module, url, method='GET')
if info['status'] != 200:
self.module.fail_json(
msg="Request failed",
status_code=info['status'],
response=info['msg'],
url=url)
self.repo_list = json.load(response)
def verify_tasks_completed(self, response_dict):
for task in response_dict['spawned_tasks']:
task_url = "%s%s" % (self.host, task['_href'])
while True:
response, info = fetch_url(
self.module,
task_url,
data='',
method='GET')
if info['status'] != 200:
self.module.fail_json(
msg="Failed to check async task status.",
status_code=info['status'],
response=info['msg'],
url=task_url)
task_dict = json.load(response)
if task_dict['state'] == 'finished':
return True
if task_dict['state'] == 'error':
self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
sleep(2)
def main():
argument_spec = url_argument_spec()
argument_spec.update(
add_export_distributor=dict(default=False, type='bool'),
feed=dict(),
importer_ssl_ca_cert=dict(),
importer_ssl_client_cert=dict(),
importer_ssl_client_key=dict(),
name=dict(required=True, aliases=['repo']),
proxy_host=dict(),
proxy_port=dict(),
publish_distributor=dict(),
pulp_host=dict(default="https://127.0.0.1"),
relative_url=dict(),
repo_type=dict(default="rpm"),
serve_http=dict(default=False, type='bool'),
serve_https=dict(default=True, type='bool'),
state=dict(
default="present",
choices=['absent', 'present', 'sync', 'publish']),
wait_for_completion=dict(default=False, type="bool"))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
add_export_distributor = module.params['add_export_distributor']
feed = module.params['feed']
importer_ssl_ca_cert = module.params['importer_ssl_ca_cert']
importer_ssl_client_cert = module.params['importer_ssl_client_cert']
importer_ssl_client_key = module.params['importer_ssl_client_key']
proxy_host = module.params['proxy_host']
proxy_port = module.params['proxy_port']
publish_distributor = module.params['publish_distributor']
pulp_host = module.params['pulp_host']
relative_url = module.params['relative_url']
repo = module.params['name']
repo_type = module.params['repo_type']
serve_http = module.params['serve_http']
serve_https = module.params['serve_https']
state = module.params['state']
wait_for_completion = module.params['wait_for_completion']
if (state == 'present') and (not relative_url):
module.fail_json(msg="When state is present, relative_url is required.")
# Ensure that the importer_ssl_* is the content and not a file path
if importer_ssl_ca_cert is not None:
importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
if os.path.isfile(importer_ssl_ca_cert_file_path):
importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
try:
importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
finally:
importer_ssl_ca_cert_file_object.close()
if importer_ssl_client_cert is not None:
importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
if os.path.isfile(importer_ssl_client_cert_file_path):
importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
try:
importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
finally:
importer_ssl_client_cert_file_object.close()
if importer_ssl_client_key is not None:
importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
if os.path.isfile(importer_ssl_client_key_file_path):
importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
try:
importer_ssl_client_key = importer_ssl_client_key_file_object.read()
finally:
importer_ssl_client_key_file_object.close()
server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
server.set_repo_list()
repo_exists = server.check_repo_exists(repo)
changed = False
if state == 'absent' and repo_exists:
if not module.check_mode:
server.delete_repo(repo)
changed = True
if state == 'sync':
if not repo_exists:
module.fail_json(msg="Repository was not found. The repository can not be synced.")
if not module.check_mode:
server.sync_repo(repo)
changed = True
if state == 'publish':
if not repo_exists:
module.fail_json(msg="Repository was not found. The repository can not be published.")
if not module.check_mode:
server.publish_repo(repo, publish_distributor)
changed = True
if state == 'present':
if not repo_exists:
if not module.check_mode:
server.create_repo(
repo_id=repo,
relative_url=relative_url,
feed=feed,
serve_http=serve_http,
serve_https=serve_https,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key,
add_export_distributor=add_export_distributor)
changed = True
else:
# Check to make sure all the settings are correct
# The importer config gets overwritten on set and not updated, so
# we set the whole config at the same time.
if not server.compare_repo_importer_config(
repo,
feed=feed,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key
):
if not module.check_mode:
server.update_repo_importer_config(
repo,
feed=feed,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key)
changed = True
if relative_url is not None:
if not server.compare_repo_distributor_config(
repo,
relative_url=relative_url
):
if not module.check_mode:
server.update_repo_distributor_config(
repo,
relative_url=relative_url)
changed = True
if not server.compare_repo_distributor_config(repo, http=serve_http):
if not module.check_mode:
server.update_repo_distributor_config(repo, http=serve_http)
changed = True
if not server.compare_repo_distributor_config(repo, https=serve_https):
if not module.check_mode:
server.update_repo_distributor_config(repo, https=serve_https)
changed = True
module.exit_json(changed=changed, repo=repo)
if __name__ == '__main__':
main()
|
akampjes/p0rk-crackling
|
refs/heads/master
|
p0rk/porkweb/urls.py
|
1
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('porkweb.views',
url(r'^job/(?P<jobid>\d+)/$', 'front'),
url(r"^$", "front"),
)
|
OCA/bank-statement-reconcile
|
refs/heads/11.0
|
account_reconcile_payment_order/__init__.py
|
83
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
|
decvalts/iris
|
refs/heads/master
|
lib/iris/tests/unit/analysis/geometry/__init__.py
|
17
|
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.analysis.geometry` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
|
tmuelle2/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
|
125
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None):
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these some fields are set lazily in the workers where possible
# because they require us to look at the filesystem and we want to be able to do that in parallel.
self.test_name = test_name
self.timeout = timeout # in msecs; should rename this for consistency
self.requires_lock = requires_lock
self.reference_files = reference_files
self.should_run_pixel_tests = should_run_pixel_tests
def __repr__(self):
return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests)
|
mitar/django
|
refs/heads/master
|
django/template/loaders/app_directories.py
|
5
|
"""
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError as e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
app_template_dirs.append(template_dir.decode(fs_encoding))
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath) as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
|
cuboxi/android_external_chromium_org
|
refs/heads/kitkat
|
ppapi/native_client/tools/browser_tester/browser_tester.py
|
69
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os.path
import socket
import sys
import thread
import time
import urllib
# Allow the import of third party modules
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, '../../../../third_party/'))
sys.path.insert(0, os.path.join(script_dir, '../../../../tools/valgrind/'))
sys.path.insert(0, os.path.join(script_dir, '../../../../testing/'))
import browsertester.browserlauncher
import browsertester.rpclistener
import browsertester.server
import memcheck_analyze
import tsan_analyze
import test_env
def BuildArgParser():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('-p', '--port', dest='port', action='store', type='int',
default='0', help='The TCP port the server will bind to. '
'The default is to pick an unused port number.')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Use the browser located here.')
parser.add_option('--map_file', dest='map_files', action='append',
type='string', nargs=2, default=[],
metavar='DEST SRC',
help='Add file SRC to be served from the HTTP server, '
'to be made visible under the path DEST.')
parser.add_option('--serving_dir', dest='serving_dirs', action='append',
type='string', default=[],
metavar='DIRNAME',
help='Add directory DIRNAME to be served from the HTTP '
'server to be made visible under the root.')
parser.add_option('--output_dir', dest='output_dir', action='store',
type='string', default=None,
metavar='DIRNAME',
help='Set directory DIRNAME to be the output directory '
'when POSTing data to the server. NOTE: if this flag is '
'not set, POSTs will fail.')
parser.add_option('--test_arg', dest='test_args', action='append',
type='string', nargs=2, default=[],
metavar='KEY VALUE',
help='Parameterize the test with a key/value pair.')
parser.add_option('--redirect_url', dest='map_redirects', action='append',
type='string', nargs=2, default=[],
metavar='DEST SRC',
help='Add a redirect to the HTTP server, '
'requests for SRC will result in a redirect (302) to DEST.')
parser.add_option('-f', '--file', dest='files', action='append',
type='string', default=[],
metavar='FILENAME',
help='Add a file to serve from the HTTP server, to be '
'made visible in the root directory. '
'"--file path/to/foo.html" is equivalent to '
'"--map_file foo.html path/to/foo.html"')
parser.add_option('--mime_type', dest='mime_types', action='append',
type='string', nargs=2, default=[], metavar='DEST SRC',
help='Map file extension SRC to MIME type DEST when '
'serving it from the HTTP server.')
parser.add_option('-u', '--url', dest='url', action='store',
type='string', default=None,
help='The webpage to load.')
parser.add_option('--ppapi_plugin', dest='ppapi_plugin', action='store',
type='string', default=None,
help='Use the browser plugin located here.')
parser.add_option('--ppapi_plugin_mimetype', dest='ppapi_plugin_mimetype',
action='store', type='string', default='application/x-nacl',
help='Associate this mimetype with the browser plugin. '
'Unused if --ppapi_plugin is not specified.')
parser.add_option('--sel_ldr', dest='sel_ldr', action='store',
type='string', default=None,
help='Use the sel_ldr located here.')
parser.add_option('--sel_ldr_bootstrap', dest='sel_ldr_bootstrap',
action='store', type='string', default=None,
help='Use the bootstrap loader located here.')
parser.add_option('--irt_library', dest='irt_library', action='store',
type='string', default=None,
help='Use the integrated runtime (IRT) library '
'located here.')
parser.add_option('--interactive', dest='interactive', action='store_true',
default=False, help='Do not quit after testing is done. '
'Handy for iterative development. Disables timeout.')
parser.add_option('--debug', dest='debug', action='store_true', default=False,
help='Request debugging output from browser.')
parser.add_option('--timeout', dest='timeout', action='store', type='float',
default=5.0,
help='The maximum amount of time to wait, in seconds, for '
'the browser to make a request. The timer resets with each '
'request.')
parser.add_option('--hard_timeout', dest='hard_timeout', action='store',
type='float', default=None,
help='The maximum amount of time to wait, in seconds, for '
'the entire test. This will kill runaway tests. ')
parser.add_option('--allow_404', dest='allow_404', action='store_true',
default=False,
help='Allow 404s to occur without failing the test.')
parser.add_option('-b', '--bandwidth', dest='bandwidth', action='store',
type='float', default='0.0',
help='The amount of bandwidth (megabits / second) to '
'simulate between the client and the server. This used for '
'replies with file payloads. All other responses are '
'assumed to be short. Bandwidth values <= 0.0 are assumed '
'to mean infinite bandwidth.')
parser.add_option('--extension', dest='browser_extensions', action='append',
type='string', default=[],
help='Load the browser extensions located at the list of '
'paths. Note: this currently only works with the Chrome '
'browser.')
parser.add_option('--tool', dest='tool', action='store',
type='string', default=None,
help='Run tests under a tool.')
parser.add_option('--browser_flag', dest='browser_flags', action='append',
type='string', default=[],
help='Additional flags for the chrome command.')
parser.add_option('--enable_ppapi_dev', dest='enable_ppapi_dev',
action='store', type='int', default=1,
help='Enable/disable PPAPI Dev interfaces while testing.')
parser.add_option('--nacl_exe_stdin', dest='nacl_exe_stdin',
type='string', default=None,
help='Redirect standard input of NaCl executable.')
parser.add_option('--nacl_exe_stdout', dest='nacl_exe_stdout',
type='string', default=None,
help='Redirect standard output of NaCl executable.')
parser.add_option('--nacl_exe_stderr', dest='nacl_exe_stderr',
type='string', default=None,
help='Redirect standard error of NaCl executable.')
parser.add_option('--expect_browser_process_crash',
dest='expect_browser_process_crash',
action='store_true',
help='Do not signal a failure if the browser process '
'crashes')
parser.add_option('--enable_crash_reporter', dest='enable_crash_reporter',
action='store_true', default=False,
help='Force crash reporting on.')
parser.add_option('--enable_sockets', dest='enable_sockets',
action='store_true', default=False,
help='Pass --allow-nacl-socket-api=<host> to Chrome, where '
'<host> is the name of the browser tester\'s web server.')
return parser
def ProcessToolLogs(options, logs_dir):
if options.tool == 'memcheck':
analyzer = memcheck_analyze.MemcheckAnalyzer('', use_gdb=True)
logs_wildcard = 'xml.*'
elif options.tool == 'tsan':
analyzer = tsan_analyze.TsanAnalyzer(use_gdb=True)
logs_wildcard = 'log.*'
files = glob.glob(os.path.join(logs_dir, logs_wildcard))
retcode = analyzer.Report(files, options.url)
return retcode
# An exception that indicates possible flake.
class RetryTest(Exception):
pass
def DumpNetLog(netlog):
sys.stdout.write('\n')
if not os.path.isfile(netlog):
sys.stdout.write('Cannot find netlog, did Chrome actually launch?\n')
else:
sys.stdout.write('Netlog exists (%d bytes).\n' % os.path.getsize(netlog))
sys.stdout.write('Dumping it to stdout.\n\n\n')
sys.stdout.write(open(netlog).read())
sys.stdout.write('\n\n\n')
# Try to discover the real IP address of this machine. If we can't figure it
# out, fall back to localhost.
# A windows bug makes using the loopback interface flaky in rare cases.
# http://code.google.com/p/chromium/issues/detail?id=114369
def GetHostName():
host = 'localhost'
try:
host = socket.gethostbyname(socket.gethostname())
except Exception:
pass
if host == '0.0.0.0':
host = 'localhost'
return host
def RunTestsOnce(url, options):
# Set the default here so we're assured hard_timeout will be defined.
# Tests, such as run_inbrowser_trusted_crash_in_startup_test, may not use the
# RunFromCommand line entry point - and otherwise get stuck in an infinite
# loop when something goes wrong and the hard timeout is not set.
# http://code.google.com/p/chromium/issues/detail?id=105406
if options.hard_timeout is None:
options.hard_timeout = options.timeout * 4
options.files.append(os.path.join(script_dir, 'browserdata', 'nacltest.js'))
# Setup the environment with the setuid sandbox path.
test_env.enable_sandbox_if_required(os.environ)
# Create server
host = GetHostName()
try:
server = browsertester.server.Create(host, options.port)
except Exception:
sys.stdout.write('Could not bind %r, falling back to localhost.\n' % host)
server = browsertester.server.Create('localhost', options.port)
# If port 0 has been requested, an arbitrary port will be bound so we need to
# query it. Older version of Python do not set server_address correctly when
# The requested port is 0 so we need to break encapsulation and query the
# socket directly.
host, port = server.socket.getsockname()
file_mapping = dict(options.map_files)
for filename in options.files:
file_mapping[os.path.basename(filename)] = filename
for server_path, real_path in file_mapping.iteritems():
if not os.path.exists(real_path):
raise AssertionError('\'%s\' does not exist.' % real_path)
mime_types = {}
for ext, mime_type in options.mime_types:
mime_types['.' + ext] = mime_type
def ShutdownCallback():
server.TestingEnded()
close_browser = options.tool is not None and not options.interactive
return close_browser
listener = browsertester.rpclistener.RPCListener(ShutdownCallback)
server.Configure(file_mapping,
dict(options.map_redirects),
mime_types,
options.allow_404,
options.bandwidth,
listener,
options.serving_dirs,
options.output_dir)
browser = browsertester.browserlauncher.ChromeLauncher(options)
full_url = 'http://%s:%d/%s' % (host, port, url)
if len(options.test_args) > 0:
full_url += '?' + urllib.urlencode(options.test_args)
browser.Run(full_url, host, port)
server.TestingBegun(0.125)
# In Python 2.5, server.handle_request may block indefinitely. Serving pages
# is done in its own thread so the main thread can time out as needed.
def Serve():
while server.test_in_progress or options.interactive:
server.handle_request()
thread.start_new_thread(Serve, ())
tool_failed = False
time_started = time.time()
def HardTimeout(total_time):
return total_time >= 0.0 and time.time() - time_started >= total_time
try:
while server.test_in_progress or options.interactive:
if not browser.IsRunning():
if options.expect_browser_process_crash:
break
listener.ServerError('Browser process ended during test '
'(return code %r)' % browser.GetReturnCode())
# If Chrome exits prematurely without making a single request to the
# web server, this is probally a Chrome crash-on-launch bug not related
# to the test at hand. Retry, unless we're in interactive mode. In
# interactive mode the user may manually close the browser, so don't
# retry (it would just be annoying.)
if not server.received_request and not options.interactive:
raise RetryTest('Chrome failed to launch.')
else:
break
elif not options.interactive and server.TimedOut(options.timeout):
js_time = server.TimeSinceJSHeartbeat()
err = 'Did not hear from the test for %.1f seconds.' % options.timeout
err += '\nHeard from Javascript %.1f seconds ago.' % js_time
if js_time > 2.0:
err += '\nThe renderer probably hung or crashed.'
else:
err += '\nThe test probably did not get a callback that it expected.'
listener.ServerError(err)
if not server.received_request:
raise RetryTest('Chrome hung before running the test.')
break
elif not options.interactive and HardTimeout(options.hard_timeout):
listener.ServerError('The test took over %.1f seconds. This is '
'probably a runaway test.' % options.hard_timeout)
break
else:
# If Python 2.5 support is dropped, stick server.handle_request() here.
time.sleep(0.125)
if options.tool:
sys.stdout.write('##################### Waiting for the tool to exit\n')
browser.WaitForProcessDeath()
sys.stdout.write('##################### Processing tool logs\n')
tool_failed = ProcessToolLogs(options, browser.tool_log_dir)
finally:
try:
if listener.ever_failed and not options.interactive:
if not server.received_request:
sys.stdout.write('\nNo URLs were served by the test runner. It is '
'unlikely this test failure has anything to do with '
'this particular test.\n')
DumpNetLog(browser.NetLogName())
except Exception:
listener.ever_failed = 1
# Try to let the browser clean itself up normally before killing it.
sys.stdout.write('##################### Terminating the browser\n')
browser.WaitForProcessDeath()
if browser.IsRunning():
sys.stdout.write('##################### TERM failed, KILLING\n')
# Always call Cleanup; it kills the process, but also removes the
# user-data-dir.
browser.Cleanup()
# We avoid calling server.server_close() here because it causes
# the HTTP server thread to exit uncleanly with an EBADF error,
# which adds noise to the logs (though it does not cause the test
# to fail). server_close() does not attempt to tell the server
# loop to shut down before closing the socket FD it is
# select()ing. Since we are about to exit, we don't really need
# to close the socket FD.
if tool_failed:
return 2
elif listener.ever_failed:
return 1
else:
return 0
# This is an entrypoint for tests that treat the browser tester as a Python
# library rather than an opaque script.
# (e.g. run_inbrowser_trusted_crash_in_startup_test)
def Run(url, options):
result = 1
attempt = 1
while True:
try:
result = RunTestsOnce(url, options)
if result:
# Currently (2013/11/15) nacl_integration is fairly flaky and there is
# not enough time to look into it. Retry if the test fails for any
# reason. Note that in general this test runner tries to only retry
# when a known flake is encountered. (See the other raise
# RetryTest(..)s in this file.) This blanket retry means that those
# other cases could be removed without changing the behavior of the test
# runner, but it is hoped that this blanket retry will eventually be
# unnecessary and subsequently removed. The more precise retries have
# been left in place to preserve the knowledge.
raise RetryTest('HACK retrying failed test.')
break
except RetryTest:
# Only retry once.
if attempt < 2:
sys.stdout.write('\n@@@STEP_WARNINGS@@@\n')
sys.stdout.write('WARNING: suspected flake, retrying test!\n\n')
attempt += 1
continue
else:
sys.stdout.write('\nWARNING: failed too many times, not retrying.\n\n')
result = 1
break
return result
def RunFromCommandLine():
parser = BuildArgParser()
options, args = parser.parse_args()
if len(args) != 0:
print args
parser.error('Invalid arguments')
# Validate the URL
url = options.url
if url is None:
parser.error('Must specify a URL')
return Run(url, options)
if __name__ == '__main__':
sys.exit(RunFromCommandLine())
|
krousey/test-infra
|
refs/heads/master
|
gubernator/third_party/defusedxml/ElementTree.py
|
53
|
# defusedxml
#
# Copyright (c) 2013 by Christian Heimes <christian@python.org>
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
"""Defused xml.etree.ElementTree facade
"""
from __future__ import print_function, absolute_import
import sys
from .common import PY3, PY26, PY31
if PY3:
import importlib
else:
from xml.etree.ElementTree import XMLParser as _XMLParser
from xml.etree.ElementTree import iterparse as _iterparse
if PY26:
from xml.parsers.expat import ExpatError as ParseError
else:
from xml.etree.ElementTree import ParseError
_IterParseIterator = None
from xml.etree.ElementTree import TreeBuilder as _TreeBuilder
from xml.etree.ElementTree import parse as _parse
from xml.etree.ElementTree import tostring
from .common import (DTDForbidden, EntitiesForbidden,
ExternalReferenceForbidden, _generate_etree_functions)
__origin__ = "xml.etree.ElementTree"
def _get_py3_cls():
"""Python 3.3 hides the pure Python code but defusedxml requires it.
The code is based on test.support.import_fresh_module().
"""
pymodname = "xml.etree.ElementTree"
cmodname = "_elementtree"
pymod = sys.modules.pop(pymodname, None)
cmod = sys.modules.pop(cmodname, None)
sys.modules[cmodname] = None
pure_pymod = importlib.import_module(pymodname)
if cmod is not None:
sys.modules[cmodname] = cmod
else:
sys.modules.pop(cmodname)
sys.modules[pymodname] = pymod
_XMLParser = pure_pymod.XMLParser
_iterparse = pure_pymod.iterparse
if PY31:
_IterParseIterator = None
from xml.parsers.expat import ExpatError as ParseError
else:
_IterParseIterator = pure_pymod._IterParseIterator
ParseError = pure_pymod.ParseError
return _XMLParser, _iterparse, _IterParseIterator, ParseError
if PY3:
_XMLParser, _iterparse, _IterParseIterator, ParseError = _get_py3_cls()
class DefusedXMLParser(_XMLParser):
def __init__(self, html=0, target=None, encoding=None,
forbid_dtd=False, forbid_entities=True,
forbid_external=True):
if PY26 or PY31:
_XMLParser.__init__(self, html, target)
else:
# Python 2.x old style class
_XMLParser.__init__(self, html, target, encoding)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
if PY3 and not PY31:
parser = self.parser
else:
parser = self._parser
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
def defused_start_doctype_decl(self, name, sysid, pubid,
has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid,
notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def defused_external_entity_ref_handler(self, context, base, sysid,
pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
# aliases
XMLTreeBuilder = XMLParse = DefusedXMLParser
parse, iterparse, fromstring = _generate_etree_functions(DefusedXMLParser,
_TreeBuilder, _IterParseIterator, _parse, _iterparse)
XML = fromstring
|
ZazieTheBeast/oscar
|
refs/heads/master
|
tests/unit/payment/model_tests.py
|
49
|
import datetime
from decimal import Decimal as D
from django.test import TestCase
from oscar.core.compat import get_user_model
from oscar.apps.payment.models import Bankcard, Source
class TestBankcard(TestCase):
def test_obfuscates_number_before_saving(self):
bankcard = Bankcard(number="1000011100000004")
bankcard.prepare_for_save()
self.assertEqual("XXXX-XXXX-XXXX-0004", bankcard.number)
def test_determines_bankcard_type(self):
bankcard = Bankcard(number="5500000000000004")
self.assertEqual("Mastercard", bankcard.card_type)
def test_provides_start_month_property(self):
start = datetime.date(day=1, month=1, year=2010)
bankcard = Bankcard(start_date=start)
self.assertEqual("01/10", bankcard.start_month())
def test_provides_end_month_property(self):
end = datetime.date(day=1, month=1, year=2010)
bankcard = Bankcard(expiry_date=end)
self.assertEqual("01/10", bankcard.expiry_month())
def test_bankcard_card_correct_save(self):
# issue #1486
user_klass = get_user_model()
user = user_klass.objects.create_user('_', 'a@a.com', 'pwd')
end = datetime.date(day=1, month=1, year=2010)
bankcard = Bankcard.objects.create(
user=user, number="5500000000000004", expiry_date=end)
saved_bankcard = Bankcard.objects.get(id=bankcard.id)
self.assertEqual('Mastercard', saved_bankcard.card_type)
class TestSource(TestCase):
def test_calculates_initial_balance_correctly(self):
source = Source(amount_allocated=D('100'))
self.assertEqual(D('100'), source.balance)
def test_calculates_balance_correctly(self):
source = Source(
amount_allocated=D('100'),
amount_debited=D('80'),
amount_refunded=D('20'))
self.assertEqual(
D('100') - D('80') + D('20'), source.balance)
def test_calculates_amount_for_refund_correctly(self):
source = Source(
amount_allocated=D('100'),
amount_debited=D('80'),
amount_refunded=D('20'))
self.assertEqual(
D('80') - D('20'), source.amount_available_for_refund)
|
kvar/ansible
|
refs/heads/seas_master_2.9.5
|
lib/ansible/galaxy/token.py
|
9
|
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import json
from stat import S_IRUSR, S_IWUSR
import yaml
from ansible import constants as C
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
display = Display()
class NoTokenSentinel(object):
""" Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
def __new__(cls, *args, **kwargs):
return cls
class KeycloakToken(object):
'''A token granted by a Keycloak server.
Like sso.redhat.com as used by cloud.redhat.com
ie Automation Hub'''
token_type = 'Bearer'
def __init__(self, access_token=None, auth_url=None, validate_certs=True):
self.access_token = access_token
self.auth_url = auth_url
self._token = None
self.validate_certs = validate_certs
def _form_payload(self):
return 'grant_type=refresh_token&client_id=cloud-services&refresh_token=%s' % self.access_token
def get(self):
if self._token:
return self._token
# - build a request to POST to auth_url
# - body is form encoded
# - 'request_token' is the offline token stored in ansible.cfg
# - 'grant_type' is 'refresh_token'
# - 'client_id' is 'cloud-services'
# - should probably be based on the contents of the
# offline_ticket's JWT payload 'aud' (audience)
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
resp = open_url(to_native(self.auth_url),
data=payload,
validate_certs=self.validate_certs,
method='POST',
http_agent=user_agent())
# TODO: handle auth errors
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
# - extract 'access_token'
self._token = data.get('access_token')
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class GalaxyToken(object):
''' Class to storing and retrieving local galaxy token '''
token_type = 'Token'
def __init__(self, token=None):
self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
# Done so the config file is only opened when set/get/save is called
self._config = None
self._token = token
@property
def config(self):
if not self._config:
self._config = self._read()
# Prioritise the token passed into the constructor
if self._token:
self._config['token'] = None if self._token is NoTokenSentinel else self._token
return self._config
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
# token file not found, create and chomd u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
with open(self.b_file, 'r') as f:
config = yaml.safe_load(f)
display.vvv('%s %s' % (action, to_text(self.b_file)))
return config or {}
def set(self, token):
self._token = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.b_file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False)
def headers(self):
headers = {}
token = self.get()
if token:
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class BasicAuthToken(object):
token_type = 'Basic'
def __init__(self, username, password=None):
self.username = username
self.password = password
self._token = None
@staticmethod
def _encode_token(username, password):
token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
return to_text(b64_val)
def get(self):
if self._token:
return self._token
self._token = self._encode_token(self.username, self.password)
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/cp857.py
|
593
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\ufffe' # 0x00d5 -> UNDEFINED
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\ufffe' # 0x00e7 -> UNDEFINED
u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\ufffe' # 0x00f2 -> UNDEFINED
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
chenokay/ripozo
|
refs/heads/master
|
ripozo/adapters/base.py
|
2
|
"""
Module containing the base adapter.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractproperty
from ripozo.utilities import join_url_parts
import json
import six
@six.add_metaclass(ABCMeta)
class AdapterBase(object):
"""
The adapter base is responsible for specifying how
a resource should be translated for the client. For
example, you may want to specify a specific hypermedia
protocol or format it in a manner that is specific to
your client (though you should probably avoid that)
:param list formats: A list of strings that indicate which Content-Types
will match with this adapter. For example, you might include
'application/vnd.siren+json' in the formats for a SIREN adapter.
This means that any request with that content type will be responded
to in the appropriate manner. Any of the strings in the list will be
considered the appropriate format for the adapter on which they are
specified.
"""
formats = None
def __init__(self, resource, base_url=''):
"""
Simple sets the resource on the instance.
:param resource: The resource that is being formatted.
:type resource: rest.viewsets.resource_base.ResourceBase
"""
self.base_url = base_url
self.resource = resource
@abstractproperty
def formatted_body(self):
"""
This property is the fully qualified and formatted response.
For example, you might return a Hypermedia formatted response
body such as the SIREN hypermedia protocol or HAL. This
must be overridden by any subclass. Additionally, it is
a property and must be decorated as such.
:return: The formatted response body.
:rtype: unicode
"""
raise NotImplementedError
@abstractproperty
def extra_headers(self):
"""
Headers that should be added to response. For example it might be
the content-type etc... This must be overridden by any
subclass since it raises a NotImplementedError. It can
also be overridden as a class attribute if it will not
be dynamic.
:return: A dictionary of the headers to return.
:rtype: dict
"""
raise NotImplementedError
def combine_base_url_with_resource_url(self, resource_url):
"""
Does exactly what it says it does. Uses ``join_url_parts``
with the ``self.base_url`` and ``resource_url`` argument
together.
:param unicode resource_url: The part to join with the ``self.base_url``
:return: The joined url
:rtype: unicode
"""
return join_url_parts(self.base_url, resource_url)
@classmethod
def format_exception(cls, exc):
"""
Takes an exception and appropriately formats
the response. By default it just returns a json dump
of the status code and the exception message.
Any exception that does not have a status_code attribute
will have a status_code of 500.
:param Exception exc: The exception to format.
:return: A tuple containing: response body, format,
http response code
:rtype: tuple
"""
status_code = getattr(exc, 'status_code', 500)
body = json.dumps(dict(status=status_code, message=six.text_type(exc)))
return body, cls.formats[0], status_code
@property
def status_code(self):
"""
:return: Returns the status code of the resource if
it is available. If it is not it assumes a 200.
:rtype: int
"""
return self.resource.status_code or 200
|
Murillo/Hackerrank-Algorithms
|
refs/heads/master
|
Algorithms/Implementation/lisa-workbook.py
|
1
|
# Lisa's Workbook
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/lisa-workbook/problem
# Time complexity: O(n³)
import math
def workbook(n, p, chapters):
special = 0
pages = 0
for i in range(n):
pages_chapt = math.ceil(chapters[i] / p)
total = chapters[i]
read = 0
for j in range(pages_chapt):
if total > p:
total = total - p
pg_remaining = read + p
else:
pg_remaining = read + total
for k in range(read, pg_remaining):
if pages == k:
special += 1
read += 1
pages += 1
return special
n,k = list(map(int, input().strip().split(' ')))
chapters = list(map(int, input().strip().split(' ')))
print (workbook(n, k, chapters))
|
space-kerala/lekha_OCR_1.0
|
refs/heads/master
|
initial_temp.py
|
1
|
# -*- coding: utf-8 -*-
# from __future__ import division
import cv2
import numpy as np
import glob
import shutil
from random import shuffle
import os
import json
import re
import preprocess as pp
import training as train
#import myapp as app
def temp_fucn():
url='../samples/train_images/'
s_list=sorted(os.listdir(url))
i=101
for j in s_list:
os.rename(url+j,url+str(i))
i+=1
print (j)
def temp_inv_fucn():
url='../samples/train_temp/'
print url
# url2='../samples/train_images/'
s_list=os.listdir(url)
for j in s_list:
file=open(url+j+'/utf8',"r")
# file2=open(url2+j+'/utf8',"r")
i_uni=file.read()
# i_uni2=file2.read()
i_uni=i_uni[:-1]
# i_uni2=i_uni2[:-1]
# print i_uni,i_uni2
os.rename(url+j,url+i_uni)
def train_svm():
svm_params = dict( kernel_type = cv2.SVM_RBF,
svm_type = cv2.SVM_C_SVC,
C=9.34, gamma=15.68 )
svm=cv2.SVM()
label_list=[]
label_list.append('a')
url='train_images/'
train_set = []
s_list=sorted(os.listdir(url))
label = 0
for i in s_list:
s_list=glob.glob(url+i+'/*.png')
if(len(s_list)>25):
file=open(url+i+'/utf8',"r")
i_uni=file.read()
i_uni=i_uni[:-1]
label_list.append(i_uni)
label+=1
else:
continue
print str(label),i,label_list[label],len(s_list)
for j in s_list:
img=cv2.imread(j,0)
img=pp.preprocess(img)
f =train.find_feature(img.copy())
# print len(f)
s = [label,f]
train_set.append(s)
f=open('label','w')
for l in label_list:
f.write(l+'\n')
f.close()
shuffle(train_set)
f_list = []
label = []
for t in train_set:
label.append(t[0])
f_list.append(t[1])
# np.savetxt('feature.txt',f_list)
# np.savetxt('label.txt',label)
# samples = np.loadtxt('feature.txt',np.float32)
# responses = np.loadtxt('label.txt',np.float32)
# responses = responses.reshape((responses.size,1))
samples = np.array(f_list,np.float32)
responses = np.array(label,np.float32)
print 'auto training initiated'
print 'please wait.....'
svm.train(samples,responses,params=svm_params)
# svm.train_auto(samples,responses,None,None,params=svm_params)
svm.save("svm_class.xml")
def gen_train_sample(im):
# train.classifierclassifier.load('svm_class.xml')
img = pp.preprocess(im.copy())
# img,rot = pp.skew_correction(img)
hight,width=im.shape
# M = cv2.getRotationMatrix2D((hight/2,width/2),rot-90,1)
# im = cv2.warpAffine(im,M,(width,hight))
# cv2.imwrite('skew correct.png',im)
contours2, hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contours = []
for cnt in contours2:
print (cv2.contourArea(cnt))
if(cv2.contourArea(cnt)>20):
contours.append(cnt)
X = [cv2.contourArea(C) for C in contours]
# print len(contours),len(X)
t=[i for i in range (0,len(contours))]
X,t = zip(*sorted(zip(X,t)))
i=0
for j in t:
x,y,w,h=cv2.boundingRect(contours[j])
box = im[y-1:y+h+1,x-1:x+w+1]
char = pp.preprocess(box.copy())
try:
f = train.find_feature(char)
fu= train.np.array(f,train.np.float32)
# print len(fu)
t = train.classifier.predict(fu)
print t
except IndexError:
t = 0
cv2.imwrite('samp/zsamp47_8_'+str(int(t))+'_'+str(i)+'.png',box)
# cv2.imwrite('./samp/'+str(i)+'.png',box)
i+=1
# train.test()
# train_svm()
# url='/home/jithin/lekha_OCR_1.0/samples/t8.png'
# img=cv2.imread(url,0)
# # im,rot = pp.skew_correction(img)
# # # cv2.imshow("img",img)
# # # cv2.waitKey(0)
# # # cv2.destroyAllWindows()
# gen_train_sample(img)
# url='../samples/Scans/test2.png'
# temp_fucn()
tno=1
def make_compare_file():
f=open('./corrected_docs/Samp_'+str(tno)+'/compare_list_new.txt','w')
g=open('./corrected_docs/Samp_'+str(tno)+'/output_file_new.txt','w')
# img=cv2.imread('./Example/dc_books_page.png',0)
path='./corrected_docs/Samp_'+str(tno)+'/*.png'
url=glob.glob(path)
img=cv2.imread(url[0],0)
# img=cv2.imread('./Samp_3/samp3.png',0)
if(img==None):
print 'image does\'nt exist'
exit()
img = pp.preprocess(img)
# im=img
# im,rot = pp.skew_correction(img)
line = pp.find_lines(img.copy())
# print len(linene)
label_list=train.label_unicode()
i=0
num=[]
for l in line:
for w in l.word_list:
for c in w.char_list:
# num.append((str(i),label_list[int(c.label)]))
tup=label_list[int(c.label)]
f.write(tup+'\n')
g.write(tup)
# cv2.imwrite('./Samp_22/samp/'+str(i)+'.png',c.data)
i+=1
g.write(' ')
g.write('\n')
f.close()
g.close()
# for tup in num:
# # f.write(tup[0]+' '+tup[1]+'\n')
# f.write(tup[1]+'\n')
# f.close()
def calculate_accuracy():
f=open('./corrected_docs/Samp_'+str(tno)+'/compare_list.txt','r')
g=open('./corrected_docs/Samp_'+str(tno)+'/compare_list_new.txt','r')
h=open('./result_compare.txt','w')
l1=f.readlines()
l2=g.readlines()
# list1=[231,376,435,623,876,892,952,961,1002,1034,1036,1100,1155]
j=0
k=0
for j in range(len(l1)):
# for line2 in l2:
a1=str(l1[j][:-1])
a2=str(l2[j][:-1])
# print a1+':'+a2
if a1==a2:
continue
else:
# print str(j)+':'+a1+'and'+a2
# if j in list1:
# print str(j)+':'+a1+'and'+a2
h.write(str(j)+' '+a2+' '+a1+'\n')
k+=1
print 'ERRORS:',k
print 'TOTAL:',j+1
print 'ACCURACY:',100-((k/(j+1))*100)
# for tno in range(1,17):
# print 'DOC_'+str(tno)
# make_compare_file()
# calculate_accuracy()
def find_vlines(img):
edges = cv2.Canny(img,50,150,apertureSize = 3)
h,w=img.shape
# print h,w
minLineLength=int(h*0.6)
maxGap=int(w*0.3)
maxLineGap =w*0.5
lines = cv2.HoughLinesP(edges,1,np.pi,minLineLength,minLineLength,maxLineGap)
try:
x=sorted([x1 for x1,y1,x2,y2 in lines[0]])
i=0
c=[]
while(i<len(x)):
j=i+1
while(j<len(x)):
if (abs(x[i]-x[j])<maxGap or abs(x[i]-x[j])<15):
c.append(x[j])
del x[j]
i+=1
break
else:
j+=1
i+=1
# print c
q=0
for x1,y1,x2,y2 in lines[0]:
if x1 in c:
continue
else:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
q+=1
return q
except:
# print 0
return 0
def mytrain():
svm_params = dict( kernel_type = cv2.SVM_RBF,
svm_type = cv2.SVM_C_SVC,
C=9.34, gamma=15.68 )
svm=cv2.SVM()
label_list=[]
label_list.append('a')
url='train_images/'
train_set = []
s_list=sorted(os.listdir(url))
fr=open('newlabel.txt','w')
label = 0
for i in s_list:
s_list=glob.glob(url+i+'/*.png')
if(len(s_list)>25):
file=open(url+i+'/utf8',"r")
i_uni=file.read()
i_uni=i_uni[:-1]
label_list.append(i_uni)
label+=1
else:
# for i in s_list1:
fr.write(i+'\n')
continue
print str(label),i,label_list[label],len(s_list)
for j in s_list:
img=cv2.imread(j,0)
# w=find_vlines(img.copy())
img=pp.preprocess(img)
f =train.find_feature(img.copy())
# f+=[w]
# print len(f)
s = [label,f]
train_set.append(s)
f=open('label','w')
for l in label_list:
f.write(l+'\n')
f.close()
shuffle(train_set)
f_list = []
label = []
for t in train_set:
label.append(t[0])
f_list.append(t[1])
# np.savetxt('feature.txt',f_list)
# np.savetxt('label.txt',label)
# samples = np.loadtxt('feature.txt',np.float32)
# responses = np.loadtxt('label.txt',np.float32)
# responses = responses.reshape((responses.size,1))
samples = np.array(f_list,np.float32)
responses = np.array(label,np.float32)
print 'auto training initiated'
print 'please wait.....'
svm.train(samples,responses,params=svm_params)
# svm.train_auto(samples,responses,None,None,params=svm_params)
svm.save("svm_class.xml")
# -------purify training set ----------
def get_labellist():
# load classifier
label_list=[]
label_list.append('a')
url='train_images/'
train_set = []
s_list=sorted(os.listdir(url))
label = 0
for i in s_list:
s_list=glob.glob(url+i+'/*.png')
if(len(s_list)>25):
file=open(url+i+'/utf8',"r")
i_uni=file.read()
i_uni=i_uni[:-1]
label_list.append(i_uni)
label+=1
else:
# for i in s_list1:
continue
# print str(label),i,label_list[label],len(s_list)
return label_list
def purify_train():
classifier = cv2.SVM()
classifier.load('svm_class.xml')
g=[]
label_list=get_labellist()
# label_list.append('a')
url='train_images/'
v=open('purify.txt','w')
train_set = []
s_list=sorted(os.listdir(url))
label = 0
for i in s_list:
s_list=glob.glob(url+i+'/*.png')
if(len(s_list)>25):
# file=open(url+i+'/utf8',"r")
# i_uni=file.read()
# i_uni=i_uni[:-1]
# label_list.append(i_uni)
label+=1
else:
# for i in s_list1:
continue
print str(label),i,label_list[label],len(s_list)
for j in s_list:
img=cv2.imread(j,0)
# w=find_vlines(img.copy())
img=pp.preprocess(img)
f =train.find_feature(img.copy())
feature = np.array(f,np.float32)
a = classifier.predict(feature)
if a !=label:
q=j.split('/')
print a,label,int(a)
# print label_list[int(a)],i
v.write(q[2]+'\t'+label_list[int(a)]+' '+str(a)+'\t'+str(label)+' '+i+'\n')
# cv2.imwrite('train_im/'+i+'/'+q[2],im)
# os.rename(j,'train_im/'+i+'/'+q[2])
feature=list(feature)
g.append((feature,label))
# print g
with open('data.txt','w') as outfile:
json.dump(g,outfile)
# return f,g
# -----------end of purification function---
def make_modified_file():
f=open('./compare_list.txt','r')
g=open('./output_file.txt','w')
img=cv2.imread('./Example/dc_books_page.png',0)
if(img==None):
print url+' does\'nt exist'
exit()
img = pp.preprocess(img)
im,rot = pp.skew_correction(img)
line = pp.find_lines(im.copy())
# print len(linene)
label_list=train.label_unicode()
q=f.readlines()
i=0
num=[]
for l in line:
for w in l.word_list:
for c in w.char_list:
# num.append((str(i),label_list[int(c.label)]))
tup=label_list[int(c.label)]
if(q[i][:-1]!=tup):
print tup
# f.write(tup+'\n')
g.write(tup)
# cv2.imwrite('samp/'+str(i)+'.png',c.data)
i+=1
g.write(' ')
g.write('\n')
f.close()
g.close()
# def callapp():
# aap = app.simpleapp_tk(None)
# aap.title('My App')
# aap.mainloop()
# aap.OnButtonClick()
def recognize_block(im):
line = pp.find_lines(im)
# print len(linene)
label_list=train.label_unicode()
i=0
string=''
for l in line:
# cv2.imwrite('zline_'+str(i)+'.png',l.data)
# string=string+'\n'
j=0
for w in l.word_list:
# cv2.imwrite('zword_'+str(i)+'_word_'+str(j)+'.png',w.data)
string=string+' '
j+=1
k=0
c=0
while(c<len(w.char_list)):
char= w.char_list[c]
try:
if(label_list[int(char.label)]in ['\'',',']):
char2=w.char_list[c+1]
if(label_list[int(char2.label)]in ['\'',',']):
string=string+'\"'
c+=1
else:
string=string+label_list[int(char.label)]
elif(label_list[int(char.label)]in ['െ','േ','്ര']):
char2=w.char_list[c+1]
if(label_list[int(char2.label)]in ['െ','്ര']):
char3=w.char_list[c+2]
string=string+label_list[int(char3.label)]
c+=1
string=string+label_list[int(char2.label)]
string=string+label_list[int(char.label)]
c+=1
else:
string=string+label_list[int(char.label)]
except IndexError:
string=string+label_list[int(char.label)]
cv2.imwrite('output/zcline_'+str(i)+'_word_'+str(j)+'_c_'+str(k)+str(int(w.char_list[c].label))+'.png',w.char_list[c].data)
k+=1
c+=1
i+=1
return string
def process_lines(url):
img=cv2.imread(url,0)
# cv2.imshow("img",img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
filename=url[9:16]
print filename
img = pp.preprocess(img)
# im,rot = pp.skew_correction(img)
# cv2.imshow("img",im)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
string=recognize_block(img)
with open('book/0001/'+filename+'.txt','w') as f:
f.write(string+'\n')
print string
def read_lines():
path='book/0001/*.png'
files=glob.glob(path)
for i in files:
# print i
process_lines(i)
# return files
# print '=======phase1========'
# def visualize():
# for j in range(len(g)):
# if g[j]==83:
# print g[j]
# g[j]=0
# else:
# g[j]=1
# print g
# visualize()
# read_lines()
# img=cv2.imread('Example/pt2.png',0)
# cv2.imshow("img",img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# img = pp.preprocess(img)
# im,rot = pp.skew_correction(img)
# # cv2.imshow("img",im)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
# print recognize_block(img)
# print recognize_block(im.copy())
# mytrain()
# train_svm()
# make_compare_file()
# calculate_accuracy()
# purify_train()
# callapp()
# make_modified_file()
|
ndawe/rootpy
|
refs/heads/master
|
rootpy/plotting/tests/test_graph.py
|
2
|
from rootpy.plotting import Graph, Graph2D, Hist
import tempfile
from random import random
from nose.tools import assert_equal
def test_init():
g = Graph(10, name='test')
assert_equal(len(g), 10)
g2d = Graph2D(10, name='test2d')
def test_init_from_hist():
h = Hist(100, -10, 10)
h.FillRandom('gaus')
g = Graph(h)
def test_init_from_file_1d():
with tempfile.NamedTemporaryFile() as f:
for i in range(100):
f.write('{0:.3f},{1:.3f}\n'.format(
random(), random()).encode('utf-8'))
f.flush()
g = Graph.from_file(f.name, sep=',')
assert_equal(len(g), 100)
def test_init_from_file_2d():
with tempfile.NamedTemporaryFile() as f:
for i in range(100):
f.write('{0:.3f},{1:.3f},{2:.3f}\n'.format(
random(), random(), random()).encode('utf-8'))
f.flush()
g = Graph2D.from_file(f.name, sep=',')
assert_equal(len(g), 100)
def test_xerr():
g = Graph(10)
list(g.xerr())
g = Graph(10, type='errors')
list(g.xerr())
g = Graph(10, type='asymm')
list(g.xerr())
def test_static_divide():
Graph.divide(Graph(Hist(10, 0, 1).FillRandom('gaus')),
Hist(10, 0, 1).FillRandom('gaus'), 'pois')
def test_operators():
h = Hist(10, -2, 2).FillRandom('gaus')
g = Graph(h)
g *= 2
g /= 2
g += 2
g -= 2
for point in g:
assert_equal(point.y.value, h[point.idx_ + 1].value)
if __name__ == "__main__":
import nose
nose.runmodule()
|
Tyulis/ALYTtool
|
refs/heads/master
|
txtree.py
|
1
|
# -*- coding:utf-8 -*-
from collections import OrderedDict
class ClsFunc(object):
'''A class which emulates a function. Useful to split big functions into small modules which share data'''
def __new__(cls,*args,**kwargs):
self=object.__new__(cls)
return self.main(*args,**kwargs)
class dump (ClsFunc):
def main(self,tree,customs=[]):
self.customs=customs
return self.dumpNode(tree)
def dumpNode(self,node):
final=''
for key in node.keys():
if key.__class__==str:
if key.startswith('__'):
continue
if node[key].__class__ in [dict,OrderedDict]+self.customs:
blk=self.dumpNode(node[key])
blk=self.indent(blk)
final+='%s: \n'%repr(key)
final+=blk
elif node[key].__class__ in (list,tuple):
dic=dict(enumerate(node[key]))
blk=self.dumpNode(dic)
blk=self.indent(blk)
final+='%s: %s\n'%(repr(key),str(node[key].__class__.__qualname__))
final+=blk
else:
final+='%s: %s\n'%(repr(key),repr(node[key]))
return final
def indent(self,s):
ret=''
for line in s.splitlines():
ret+=' |%s\n'%line
return ret
class load (ClsFunc):
def main(self,data):
return self.loadNode(data)
def loadNode(self,node):
dic=OrderedDict()
node=node.splitlines()
i=0
while True:
try:
line=node[i].split(': ')
except IndexError:
break
if line[1].strip() in ('','list','tuple'):
subnode=''
for subline in node[i+1:]:
if subline.startswith('\t|'):
subnode+=subline+'\n'
i+=1
else:
break
res=self.loadNode(self.unindent(subnode))
if line[1]=='list':
res=list(res.values())
elif line[1]=='tuple':
res=tuple(res.values())
dic[eval(line[0])]=res
else:
if line[1] in ('true','false','none'):
line[1]=line[1].capitalize()
dic[eval(line[0])]=eval(line[1])
i+=1
return dic
def unindent(self,s):
s=s.splitlines()
ret=''
for line in s:
ret+='%s\n'%line[2:]
return ret
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
script.module.liveresolver/lib/liveresolver/modules/f4mproxy/utils/python_rsakey.py
|
136
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Pure-Python RSA implementation."""
from .cryptomath import *
from .asn1parser import ASN1Parser
from .rsakey import *
from .pem import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return Python_RSAKey._parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return Python_RSAKey._parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
parsePEM = staticmethod(parsePEM)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
|
followyourheart/airflow
|
refs/heads/master
|
airflow/bin/cli.py
|
4
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import argparse
import dateutil.parser
from datetime import datetime
import logging
import os
import subprocess
import sys
import airflow
from airflow import jobs, settings, utils
from airflow.configuration import conf
from airflow.executors import DEFAULT_EXECUTOR
from airflow.models import DagBag, TaskInstance, DagPickle
from airflow.utils import AirflowException
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
# Common help text across subcommands
mark_success_help = "Mark jobs as succeeded without running them"
subdir_help = "File location or directory from which to look for the dag"
def log_to_stdout():
log = logging.getLogger()
log.setLevel(settings.LOGGING_LEVEL)
logformat = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logformat)
log.addHandler(ch)
def backfill(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
include_adhoc=args.include_adhoc,
local=args.local,
donot_pickle=args.donot_pickle,
ignore_dependencies=args.ignore_dependencies)
def run(args):
utils.pessimistic_connection_handling()
# Setting up logging
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
directory = log + "/{args.dag_id}/{args.task_id}".format(args=args)
if not os.path.exists(directory):
os.makedirs(directory)
args.execution_date = dateutil.parser.parse(args.execution_date)
iso = args.execution_date.isoformat()
filename = "{directory}/{iso}".format(**locals())
subdir = None
if args.subdir:
subdir = args.subdir.replace(
"DAGS_FOLDER", conf.get("core", "DAGS_FOLDER"))
subdir = os.path.expanduser(subdir)
logging.basicConfig(
filename=filename,
level=settings.LOGGING_LEVEL,
format=settings.LOG_FORMAT)
if not args.pickle:
dagbag = DagBag(subdir)
if args.dag_id not in dagbag.dags:
msg = 'DAG [{0}] could not be found'.format(args.dag_id)
logging.error(msg)
raise AirflowException(msg)
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
else:
session = settings.Session()
logging.info('Loading pickle id {args.pickle}'.format(**locals()))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
task_start_date = None
if args.task_start_date:
task_start_date = dateutil.parser.parse(args.task_start_date)
task.start_date = task_start_date
ti = TaskInstance(task, args.execution_date)
if args.local:
print("Logging into: " + filename)
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
force=args.force,
pickle_id=args.pickle,
task_start_date=task_start_date,
ignore_dependencies=args.ignore_dependencies)
run_job.run()
elif args.raw:
ti.run(
mark_success=args.mark_success,
force=args.force,
ignore_dependencies=args.ignore_dependencies,
job_id=args.job_id,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
print((
'Pickled dag {dag} '
'as pickle_id:{pickle_id}').format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = DEFAULT_EXECUTOR
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_dependencies=args.ignore_dependencies,
force=args.force)
executor.heartbeat()
executor.end()
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
def list_dags(args):
dagbag = DagBag(args.subdir)
print("\n".join(sorted(dagbag.dags)))
def list_tasks(args):
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
def test(args):
log_to_stdout()
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.run(force=True, ignore_dependencies=True, test_mode=True)
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
dag.clear(
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=True)
def webserver(args):
print(settings.HEADER)
log_to_stdout()
from airflow.www.app import app
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app.run(debug=True, port=args.port, host=args.hostname)
else:
print(
'Running Tornado server on host {host} and port {port}...'.format(
host=args.hostname, port=args.port))
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port)
IOLoop.instance().start()
def scheduler(args):
print(settings.HEADER)
log_to_stdout()
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=args.subdir,
num_runs=args.num_runs)
job.run()
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename):
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
def worker(args):
# Worker to serve static log files through this simple flask app
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
sp = subprocess.Popen(
['airflow', 'serve_logs'],
env=env,
)
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
}
worker.run(**options)
sp.kill()
def initdb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
utils.initdb()
print("Done.")
def resetdb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
if input(
"This will drop existing tables if they exist. "
"Proceed? (y/n)").upper() == "Y":
logging.basicConfig(level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
utils.resetdb()
else:
print("Bail.")
def upgradedb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
utils.upgradedb()
def version(args):
print(settings.HEADER + " v" + airflow.__version__)
def flower(args):
broka = conf.get('celery', 'BROKER_URL')
port = '--port=' + args.port
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
sp = subprocess.Popen(['flower', '-b', broka, port, api])
sp.wait()
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help')
ht = "Run subsections of a DAG for a specified date range"
parser_backfill = subparsers.add_parser('backfill', help=ht)
parser_backfill.add_argument("dag_id", help="The id of the dag to run")
parser_backfill.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to backfill (optional)")
parser_backfill.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_backfill.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
parser_backfill.add_argument(
"-m", "--mark_success",
help=mark_success_help, action="store_true")
parser_backfill.add_argument(
"-l", "--local",
help="Run the task using the LocalExecutor", action="store_true")
parser_backfill.add_argument(
"-x", "--donot_pickle",
help=(
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
action="store_true")
parser_backfill.add_argument(
"-a", "--include_adhoc",
help="Include dags with the adhoc parameter.", action="store_true")
parser_backfill.add_argument(
"-i", "--ignore_dependencies",
help=(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction with task_regex"),
action="store_true")
parser_backfill.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_backfill.set_defaults(func=backfill)
ht = "Clear a set of task instance, as if they never ran"
parser_clear = subparsers.add_parser('clear', help=ht)
parser_clear.add_argument("dag_id", help="The id of the dag to run")
parser_clear.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to clear (optional)")
parser_clear.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_clear.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
ht = "Include upstream tasks"
parser_clear.add_argument(
"-u", "--upstream", help=ht, action="store_true")
ht = "Only failed jobs"
parser_clear.add_argument(
"-f", "--only_failed", help=ht, action="store_true")
ht = "Only running jobs"
parser_clear.add_argument(
"-r", "--only_running", help=ht, action="store_true")
ht = "Include downstream tasks"
parser_clear.add_argument(
"-d", "--downstream", help=ht, action="store_true")
parser_clear.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_clear.set_defaults(func=clear)
ht = "Run a single task instance"
parser_run = subparsers.add_parser('run', help=ht)
parser_run.add_argument("dag_id", help="The id of the dag to run")
parser_run.add_argument("task_id", help="The task_id to run")
parser_run.add_argument(
"execution_date", help="The execution date to run")
parser_run.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_run.add_argument(
"-s", "--task_start_date",
help="Override the tasks's start_date (used internally)",)
parser_run.add_argument(
"-m", "--mark_success", help=mark_success_help, action="store_true")
parser_run.add_argument(
"-f", "--force",
help="Force a run regardless or previous success",
action="store_true")
parser_run.add_argument(
"-l", "--local",
help="Runs the task locally, don't use the executor",
action="store_true")
parser_run.add_argument(
"-r", "--raw",
help=argparse.SUPPRESS,
action="store_true")
parser_run.add_argument(
"-i", "--ignore_dependencies",
help="Ignore upstream and depends_on_past dependencies",
action="store_true")
parser_run.add_argument(
"--ship_dag",
help="Pickles (serializes) the DAG and ships it to the worker",
action="store_true")
parser_run.add_argument(
"-p", "--pickle",
help="Serialized pickle object of the entire dag (used internally)")
parser_run.add_argument(
"-j", "--job_id", help=argparse.SUPPRESS)
parser_run.set_defaults(func=run)
ht = (
"Test a task instance. This will run a task without checking for "
"dependencies or recording it's state in the database."
)
parser_test = subparsers.add_parser('test', help=ht)
parser_test.add_argument("dag_id", help="The id of the dag to run")
parser_test.add_argument("task_id", help="The task_id to run")
parser_test.add_argument(
"execution_date", help="The execution date to run")
parser_test.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_test.set_defaults(func=test)
ht = "Get the status of a task instance."
parser_task_state = subparsers.add_parser('task_state', help=ht)
parser_task_state.add_argument("dag_id", help="The id of the dag to check")
parser_task_state.add_argument("task_id", help="The task_id to check")
parser_task_state.add_argument(
"execution_date", help="The execution date to check")
parser_task_state.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_task_state.set_defaults(func=task_state)
ht = "Start a Airflow webserver instance"
parser_webserver = subparsers.add_parser('webserver', help=ht)
parser_webserver.add_argument(
"-p", "--port",
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="Set the port on which to run the web server")
parser_webserver.add_argument(
"-hn", "--hostname",
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server")
ht = "Use the server that ships with Flask in debug mode"
parser_webserver.add_argument(
"-d", "--debug", help=ht, action="store_true")
parser_webserver.set_defaults(func=webserver)
ht = "Start a scheduler scheduler instance"
parser_scheduler = subparsers.add_parser('scheduler', help=ht)
parser_scheduler.add_argument(
"-d", "--dag_id", help="The id of the dag to run")
parser_scheduler.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_scheduler.add_argument(
"-n", "--num_runs",
default=None,
type=int,
help="Set the number of runs to execute before exiting")
parser_scheduler.set_defaults(func=scheduler)
ht = "Initialize the metadata database"
parser_initdb = subparsers.add_parser('initdb', help=ht)
parser_initdb.set_defaults(func=initdb)
ht = "Burn down and rebuild the metadata database"
parser_resetdb = subparsers.add_parser('resetdb', help=ht)
parser_resetdb.set_defaults(func=resetdb)
ht = "Upgrade metadata database to latest version"
parser_upgradedb = subparsers.add_parser('upgradedb', help=ht)
parser_upgradedb.set_defaults(func=upgradedb)
ht = "List the DAGs"
parser_list_dags = subparsers.add_parser('list_dags', help=ht)
parser_list_dags.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_dags.set_defaults(func=list_dags)
ht = "List the tasks within a DAG"
parser_list_tasks = subparsers.add_parser('list_tasks', help=ht)
parser_list_tasks.add_argument(
"-t", "--tree", help="Tree view", action="store_true")
parser_list_tasks.add_argument(
"dag_id", help="The id of the dag")
parser_list_tasks.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_tasks.set_defaults(func=list_tasks)
ht = "Start a Celery worker node"
parser_worker = subparsers.add_parser('worker', help=ht)
parser_worker.add_argument(
"-q", "--queues",
help="Comma delimited list of queues to cater serve",
default=conf.get('celery', 'DEFAULT_QUEUE'))
parser_worker.set_defaults(func=worker)
ht = "Serve logs generate by worker"
parser_logs = subparsers.add_parser('serve_logs', help=ht)
parser_logs.set_defaults(func=serve_logs)
ht = "Start a Celery Flower"
parser_flower = subparsers.add_parser('flower', help=ht)
parser_flower.add_argument(
"-p", "--port", help="The port",
default=conf.get('celery', 'FLOWER_PORT'))
parser_flower.add_argument(
"-a", "--broker_api", help="Broker api")
parser_flower.set_defaults(func=flower)
parser_version = subparsers.add_parser('version', help="Show version")
parser_version.set_defaults(func=version)
return parser
|
jmwright/cadquery-x
|
refs/heads/master
|
gui/libs/pygments/styles/rrt.py
|
135
|
# -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
|
shangwuhencc/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/label.py
|
137
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
Neurosim-lab/netpyne
|
refs/heads/development
|
examples/asdOptim/cfg.py
|
2
|
from netpyne import specs
cfg = specs.SimConfig()
cfg.networkType = 'simple' # 'complex'
# --------------------------------------------------------
# Simple network
# --------------------------------------------------------
if cfg.networkType == 'simple':
# Simulation options
cfg.dt = 0.025
cfg.duration = 2*1e3
cfg.verbose = False
cfg.saveJson = True
cfg.filename = 'simple_net'
cfg.saveDataInclude = ['simData']
cfg.recordStep = 0.1
cfg.printPopAvgRates = [500, cfg.duration]
# cfg.recordCells = [1]
# cfg.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}}
# Variable parameters (used in netParams)
cfg.prob = 0.2
cfg.weight = 0.025
cfg.delay = 2
# --------------------------------------------------------
# Complex network
# --------------------------------------------------------
elif cfg.networkType == 'complex':
cfg.duration = 1*1e3 # Duration of the simulation, in ms
cfg.dt = 0.1 # Internal integration timestep to use
cfg.verbose = False # Show detailed messages
cfg.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
cfg.filename = 'simple_net' # Set file output name
cfg.saveDataInclude = ['simData']
cfg.saveJson = True
cfg.printPopAvgRates = [100, cfg.duration]
# Variable parameters (used in netParams)
cfg.probEall = 0.1
cfg.weightEall = 0.005
cfg.probIE = 0.4
cfg.weightIE = 0.001
cfg.probLengthConst = 150
cfg.stimWeight = 0.1
|
linyiqun/minos
|
refs/heads/master
|
owl/quota/management/commands/quota_reportor.py
|
5
|
import collections
import datetime
import logging
import smtplib
import sys
import time
from optparse import make_option
from os import path
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
import owl_config
import monitor.dbutil
import utils.mail
import utils.quota_util
import deploy_utils
from monitor.models import Cluster, Quota, Service
logger = logging.getLogger('quota')
# cluster to generate report
QUOTA_REPORT_CLUSTER = owl_config.QUOTA_REPORT_CLUSTER
# user that receive cluster report
QUOTA_REPORT_ADMINS = owl_config.QUOTA_REPORT_ADMINS
# user that receive cluster quota alert
QUOTA_ALERT_ADMINS = owl_config.QUOTA_ALERT_ADMINS
KERBEROS_IDS_PATH = owl_config.KERBEROS_IDS_PATH
admin_email = ''
try:
admin_email = settings.ADMINS[0][1]
except:
pass
class QuotaReportor:
"""Update path quota in hdfs"""
def __init__(self, options):
self.options = options
self.mailer = utils.mail.Mailer(options)
self.user_report = {} # report group by user
self.cluster_report = {} # report group by cluster
self.today = datetime.date.today()
self.kerb_user_map = self.init_kerb_user_map()
def report(self):
logger.info('start make quota report')
self.start_time = time.time()
try:
for cluster_name in QUOTA_REPORT_CLUSTER:
self.update_cluster(cluster_name)
except Exception as e:
logger.info('gather quota info failed: %r', e)
self.mailer.send_email(subject = 'Make quota report failed',
content = repr(e),
to_email = admin_email,
)
else:
self.send_report_mail()
logger.info('spent %f seconds for make quota report',
time.time() - self.start_time)
def update_cluster(self, cluster_name):
hdfs_service = Service.objects.get(name='hdfs')
cluster = Cluster.objects.get(service=hdfs_service, name = cluster_name)
quota_list = monitor.dbutil.get_quota_summary(cluster)
for quota_record in quota_list:
user_report = self.user_report.setdefault(quota_record.name, {})
user_report[cluster_name] = quota_record
cluster_report = self.cluster_report.setdefault(cluster_name, {})
cluster_report[quota_record.name] = quota_record
def send_report_mail(self):
self.send_user_report_mail()
self.send_cluster_report_mail()
self.alert_to_not_healthy_users()
def send_user_report_mail(self):
for user, cluster_quota in self.user_report.iteritems():
subject = 'Hadoop hdfs quota report for user %s' % user
content = 'Report date: %s<br>' % self.today
content += self.format_quota_report_content('cluster', cluster_quota)
email_user = self.map_kerb_user_to_email_user(user)
if email_user:
email_addr = ','.join([addr for addr in email_user.split()])
self.mailer.send_email(to_email = email_addr,
subject = subject,
content = content,
type = 'html')
else:
logger.error('User %s has no email user' % user)
def send_cluster_report_mail(self):
subject = 'Hadoop hdfs quota report for admin'
content = 'Report date: %s<br>' % self.today
for cluster, user_quota in self.cluster_report.iteritems():
content += 'Quota summary on cluster[%s]<br>' % cluster
content += self.format_quota_report_content('user', user_quota)
content += '********<br>'
self.mailer.send_email(to_email = QUOTA_REPORT_ADMINS,
subject = subject,
content = content,
type = 'html')
def alert_to_not_healthy_users(self):
subject = 'Hadoop hdfs quota alert'
for user, cluster_quota in self.user_report.iteritems():
for cluster, quota in cluster_quota.iteritems():
need_alert = False
content = 'Cluster: %s\n' % cluster
content += 'User: %s\n' % user
if not utils.quota_util.is_space_quota_healthy(
quota.space_quota, quota.used_space_quota):
content += 'Alert: space quota exceeded the threshold. \
Please cleanup trash or apply for more space quota.\n'
need_alert = True
if not utils.quota_util.is_name_quota_healthy(
quota.quota, quota.used_quota):
content += 'Alert: name quota exceeded the threshold. \
Please cleanup trash or apply for more name quota.\n'
need_alert = True
if need_alert:
email_addrs = QUOTA_ALERT_ADMINS
email_user = self.map_kerb_user_to_email_user(user)
if email_user:
email_addrs += ','.join([addr for addr in email_user.split()])
self.mailer.send_email(to_email = email_addrs,
subject = subject,
content = content)
@staticmethod
def format_quota_report_content(key_name, quota_map):
content = '<table>'
HEADER_FORMAT_STR = '<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>'
content += HEADER_FORMAT_STR.format(key_name, 'SpaceQuota', 'UsedSpace', 'RemainingSpace', 'NameQuota', 'UsedName', 'RemainingName')
ROW_FORMAT_STR = '<tr><td>{}</td><td>{}</td><td>{}</td><td %s>{}</td><td>{}</td><td>{}</td><td %s>{}</td></tr>'
ordered_dict = collections.OrderedDict(sorted(quota_map.items()))
for key, quota in ordered_dict.iteritems():
space_quota_color = '' if utils.quota_util.is_space_quota_healthy(
quota.space_quota, quota.used_space_quota) \
else 'style="color:rgb(255,0,0)"'
name_quota_color = '' if utils.quota_util.is_name_quota_healthy(
quota.quota, quota.used_quota) \
else 'style="color:rgb(255,0,0)"'
format_str = ROW_FORMAT_STR % (space_quota_color, name_quota_color)
content += format_str.format(key,
format_bigint(quota.space_quota),
format_bigint(quota.used_space_quota),
format_bigint(quota.remaining_space_quota),
quota.quota, quota.used_quota, quota.remaining_quota)
content += '</table>'
return content
def init_kerb_user_map(self):
res = {}
config_path = deploy_utils.get_config_dir()
with open(path.join(config_path, KERBEROS_IDS_PATH)) as f:
for line in f:
if line.startswith('#'):
continue
try:
# file format: kerb_user user1[ user2 user3]
kerb_user, email_users = line.strip().split(' ', 1)
if kerb_user in res:
logger.warn('Duplicated kerb user config for user: %s' % kerb_user)
res[kerb_user] = email_users
except Exception as e:
logger.warn('Failed to parse user config [%r]: %s' % (e, line))
return res
def map_kerb_user_to_email_user(self, kerb_user):
if kerb_user in self.kerb_user_map:
return self.kerb_user_map[kerb_user]
else:
return None
class Command(BaseCommand):
args = ''
help = "Run the background updater to collector quota on hdfs clusters."
def handle(self, *args, **options):
self.args = args
self.options = options
self.mailer = utils.mail.Mailer(options)
self.stdout.write("args: %r\n" % (args, ))
self.stdout.write("options: %r\n" % options)
quota_reportor = QuotaReportor(options)
try:
quota_reportor.report()
except Exception as e:
logger.warning('Quota repotor aborted: %r', e)
self.mailer.send_email(subject = 'Make quota report failed',
content = repr(e),
to_email = admin_email,
)
def format_bigint(value):
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1024*1024:
return value
K = 1024
formaters = (
(2, '%.2fM'),
(3, '%.2fG'),
(4, '%.2fT'),
(5, '%.2fP'),
)
for exponent, formater in formaters:
larger_num = K ** exponent
if value < larger_num * K:
return formater % (value/float(larger_num))
|
nrc/rustc-perf
|
refs/heads/master
|
collector/benchmarks/script-servo/components/script/dom/bindings/codegen/parser/tests/test_distinguishability.py
|
51
|
def firstArgType(method):
return method.signatures()[0][1][0].type
def WebIDLTest(parser, harness):
parser.parse("""
dictionary Dict {
};
callback interface Foo {
};
interface Bar {
// Bit of a pain to get things that have dictionary types
void passDict(optional Dict arg);
void passFoo(Foo arg);
void passNullableUnion((object? or DOMString) arg);
void passNullable(Foo? arg);
};
""")
results = parser.finish()
iface = results[2]
harness.ok(iface.isInterface(), "Should have interface")
dictMethod = iface.members[0]
ifaceMethod = iface.members[1]
nullableUnionMethod = iface.members[2]
nullableIfaceMethod = iface.members[3]
dictType = firstArgType(dictMethod)
ifaceType = firstArgType(ifaceMethod)
harness.ok(dictType.isDictionary(), "Should have dictionary type");
harness.ok(ifaceType.isInterface(), "Should have interface type");
harness.ok(ifaceType.isCallbackInterface(), "Should have callback interface type");
harness.ok(not dictType.isDistinguishableFrom(ifaceType),
"Dictionary not distinguishable from callback interface")
harness.ok(not ifaceType.isDistinguishableFrom(dictType),
"Callback interface not distinguishable from dictionary")
nullableUnionType = firstArgType(nullableUnionMethod)
nullableIfaceType = firstArgType(nullableIfaceMethod)
harness.ok(nullableUnionType.isUnion(), "Should have union type");
harness.ok(nullableIfaceType.isInterface(), "Should have interface type");
harness.ok(nullableIfaceType.nullable(), "Should have nullable type");
harness.ok(not nullableUnionType.isDistinguishableFrom(nullableIfaceType),
"Nullable type not distinguishable from union with nullable "
"member type")
harness.ok(not nullableIfaceType.isDistinguishableFrom(nullableUnionType),
"Union with nullable member type not distinguishable from "
"nullable type")
parser = parser.reset()
parser.parse("""
interface TestIface {
void passKid(Kid arg);
void passParent(Parent arg);
void passGrandparent(Grandparent arg);
void passImplemented(Implemented arg);
void passImplementedParent(ImplementedParent arg);
void passUnrelated1(Unrelated1 arg);
void passUnrelated2(Unrelated2 arg);
void passArrayBuffer(ArrayBuffer arg);
void passArrayBuffer(ArrayBufferView arg);
};
interface Kid : Parent {};
interface Parent : Grandparent {};
interface Grandparent {};
interface Implemented : ImplementedParent {};
Parent implements Implemented;
interface ImplementedParent {};
interface Unrelated1 {};
interface Unrelated2 {};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isInterface(), "Should have interface")
argTypes = [firstArgType(method) for method in iface.members]
unrelatedTypes = [firstArgType(method) for method in iface.members[-3:]]
for type1 in argTypes:
for type2 in argTypes:
distinguishable = (type1 is not type2 and
(type1 in unrelatedTypes or
type2 in unrelatedTypes))
harness.check(type1.isDistinguishableFrom(type2),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type1, "" if distinguishable else "not ", type2))
harness.check(type2.isDistinguishableFrom(type1),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type2, "" if distinguishable else "not ", type1))
parser = parser.reset()
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(long arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
harness.check(len(results[1].members), 1,
"Should look like we have one method")
harness.check(len(results[1].members[0].signatures()), 4,
"Should have four signatures")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should throw when args before the distinguishing arg are not "
"all the same type")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, DOMString arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should throw when there is no distinguishing index")
# Now let's test our whole distinguishability table
argTypes = [ "long", "short", "long?", "short?", "boolean",
"boolean?", "DOMString", "ByteString", "Enum", "Enum2",
"Interface", "Interface?",
"AncestorInterface", "UnrelatedInterface",
"ImplementedInterface", "CallbackInterface",
"CallbackInterface?", "CallbackInterface2",
"object", "Callback", "Callback2", "optional Dict",
"optional Dict2", "sequence<long>", "sequence<short>",
"record<DOMString, object>",
"record<USVString, Dict>",
"record<ByteString, long>",
"Date", "Date?", "any",
"Promise<any>", "Promise<any>?",
"USVString", "ArrayBuffer", "ArrayBufferView", "SharedArrayBuffer",
"Uint8Array", "Uint16Array",
"(long or Callback)", "optional (long or Dict)",
]
# When we can parse Date, we need to add it here.
# XXXbz we can, and should really do that...
# Try to categorize things a bit to keep list lengths down
def allBut(list1, list2):
return [a for a in list1 if a not in list2 and
(a != "any" and a != "Promise<any>" and a != "Promise<any>?")]
unions = [ "(long or Callback)", "optional (long or Dict)" ]
numerics = [ "long", "short", "long?", "short?" ]
booleans = [ "boolean", "boolean?" ]
primitives = numerics + booleans
nonNumerics = allBut(argTypes, numerics + unions)
nonBooleans = allBut(argTypes, booleans)
strings = [ "DOMString", "ByteString", "Enum", "Enum2", "USVString" ]
nonStrings = allBut(argTypes, strings)
nonObjects = primitives + strings
objects = allBut(argTypes, nonObjects )
bufferSourceTypes = ["ArrayBuffer", "ArrayBufferView", "Uint8Array", "Uint16Array"]
sharedBufferSourceTypes = ["SharedArrayBuffer"]
interfaces = [ "Interface", "Interface?", "AncestorInterface",
"UnrelatedInterface", "ImplementedInterface" ] + bufferSourceTypes + sharedBufferSourceTypes
nullables = (["long?", "short?", "boolean?", "Interface?",
"CallbackInterface?", "optional Dict", "optional Dict2",
"Date?", "any", "Promise<any>?"] +
allBut(unions, [ "(long or Callback)" ]))
dates = [ "Date", "Date?" ]
sequences = [ "sequence<long>", "sequence<short>" ]
nonUserObjects = nonObjects + interfaces + dates + sequences
otherObjects = allBut(argTypes, nonUserObjects + ["object"])
notRelatedInterfaces = (nonObjects + ["UnrelatedInterface"] +
otherObjects + dates + sequences + bufferSourceTypes + sharedBufferSourceTypes)
records = [ "record<DOMString, object>", "record<USVString, Dict>",
"record<ByteString, long>" ]
# Build a representation of the distinguishability table as a dict
# of dicts, holding True values where needed, holes elsewhere.
data = dict();
for type in argTypes:
data[type] = dict()
def setDistinguishable(type, types):
for other in types:
data[type][other] = True
setDistinguishable("long", nonNumerics)
setDistinguishable("short", nonNumerics)
setDistinguishable("long?", allBut(nonNumerics, nullables))
setDistinguishable("short?", allBut(nonNumerics, nullables))
setDistinguishable("boolean", nonBooleans)
setDistinguishable("boolean?", allBut(nonBooleans, nullables))
setDistinguishable("DOMString", nonStrings)
setDistinguishable("ByteString", nonStrings)
setDistinguishable("USVString", nonStrings)
setDistinguishable("Enum", nonStrings)
setDistinguishable("Enum2", nonStrings)
setDistinguishable("Interface", notRelatedInterfaces)
setDistinguishable("Interface?", allBut(notRelatedInterfaces, nullables))
setDistinguishable("AncestorInterface", notRelatedInterfaces)
setDistinguishable("UnrelatedInterface",
allBut(argTypes, ["object", "UnrelatedInterface"]))
setDistinguishable("ImplementedInterface", notRelatedInterfaces)
setDistinguishable("CallbackInterface", nonUserObjects)
setDistinguishable("CallbackInterface?", allBut(nonUserObjects, nullables))
setDistinguishable("CallbackInterface2", nonUserObjects)
setDistinguishable("object", nonObjects)
setDistinguishable("Callback", nonUserObjects)
setDistinguishable("Callback2", nonUserObjects)
setDistinguishable("optional Dict", allBut(nonUserObjects, nullables))
setDistinguishable("optional Dict2", allBut(nonUserObjects, nullables))
setDistinguishable("sequence<long>",
allBut(argTypes, sequences + ["object"]))
setDistinguishable("sequence<short>",
allBut(argTypes, sequences + ["object"]))
setDistinguishable("record<DOMString, object>", nonUserObjects)
setDistinguishable("record<USVString, Dict>", nonUserObjects)
setDistinguishable("record<ByteString, long>", nonUserObjects)
setDistinguishable("Date", allBut(argTypes, dates + ["object"]))
setDistinguishable("Date?", allBut(argTypes, dates + nullables + ["object"]))
setDistinguishable("any", [])
setDistinguishable("Promise<any>", [])
setDistinguishable("Promise<any>?", [])
setDistinguishable("ArrayBuffer", allBut(argTypes, ["ArrayBuffer", "object"]))
setDistinguishable("ArrayBufferView", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "Uint16Array", "object"]))
setDistinguishable("Uint8Array", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "object"]))
setDistinguishable("Uint16Array", allBut(argTypes, ["ArrayBufferView", "Uint16Array", "object"]))
setDistinguishable("SharedArrayBuffer", allBut(argTypes, ["SharedArrayBuffer", "object"]))
setDistinguishable("(long or Callback)",
allBut(nonUserObjects, numerics))
setDistinguishable("optional (long or Dict)",
allBut(nonUserObjects, numerics + nullables))
def areDistinguishable(type1, type2):
return data[type1].get(type2, False)
def checkDistinguishability(parser, type1, type2):
idlTemplate = """
enum Enum { "a", "b" };
enum Enum2 { "c", "d" };
interface Interface : AncestorInterface {};
interface AncestorInterface {};
interface UnrelatedInterface {};
interface ImplementedInterface {};
Interface implements ImplementedInterface;
callback interface CallbackInterface {};
callback interface CallbackInterface2 {};
callback Callback = any();
callback Callback2 = long(short arg);
dictionary Dict {};
dictionary Dict2 {};
interface TestInterface {%s
};
"""
methodTemplate = """
void myMethod(%s arg);"""
methods = (methodTemplate % type1) + (methodTemplate % type2)
idl = idlTemplate % methods
parser = parser.reset()
threw = False
try:
parser.parse(idl)
results = parser.finish()
except:
threw = True
if areDistinguishable(type1, type2):
harness.ok(not threw,
"Should not throw for '%s' and '%s' because they are distinguishable" % (type1, type2))
else:
harness.ok(threw,
"Should throw for '%s' and '%s' because they are not distinguishable" % (type1, type2))
# Enumerate over everything in both orders, since order matters in
# terms of our implementation of distinguishability checks
for type1 in argTypes:
for type2 in argTypes:
checkDistinguishability(parser, type1, type2)
|
Godiyos/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Doc/includes/sqlite3/execute_2.py
|
47
|
import sqlite3
con = sqlite3.connect("mydb")
cur = con.cursor()
who = "Yeltsin"
age = 72
cur.execute("select name_last, age from people where name_last=:who and age=:age",
{"who": who, "age": age})
print(cur.fetchone())
|
deanishe/alfred-fakeum
|
refs/heads/master
|
src/libs/faker/providers/person/es_MX/__init__.py
|
2
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}} {{last_name}}',
'{{first_name}} {{first_name}} {{last_name}}',
'{{first_name}} {{first_name}} {{last_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{prefix}} {{first_name}} {{last_name}}',
)
first_names = ('Abel', 'Abelardo', 'Abigail', 'Abraham', 'Abril',
'Adalberto', 'Adán', 'Adela', 'Adriana',
'Aida', 'Alejandra', 'Agustín', 'Alberto', 'Aldonza', 'Alicia',
'Alta Gracia', 'Alonso', 'Aldo', 'Alejandro', 'Alfonso', 'Alfredo',
'Alma', 'Alvaro', 'Amalia', 'Amanda', 'Amador', 'Amelia', 'Ana',
'Anabel', 'Ana Luisa', 'Ana María', 'Anel', 'Andrea', 'Andrés',
'Ángel', 'Ángela', 'Angélica', 'Antonia', 'Antonio', 'Araceli',
'Arcelia', 'Ariadna', 'Armando', 'Arturo', 'Asunción', 'Augusto',
'Aurora', 'Aurelio', 'Barbara', 'Beatriz', 'Berta', 'Benito',
'Benjamín', 'Bernardo', 'Bernabé', 'Bianca', 'Blanca',
'Bruno', 'Camila', 'Camilo', 'Caridad', 'Carla', 'Carlos', 'Carlota',
'Carmen', 'Carolina', 'Catalina', 'César', 'Cecilia', 'Celia',
'Citlali', 'Clara', 'Claudia', 'Claudio', 'Clemente', 'Concepción',
'Conchita', 'Cornelio', 'Cristian', 'Cristal',
'Cristina', 'Cristobal', 'Cynthia', 'Dalia', 'Daniel', 'Daniela',
'Darío', 'David', 'Débora', 'Delia', 'Diana', 'Diego', 'Dolores',
'Dulce', 'Dulce María', 'Eduardo', 'Elena', 'Elias', 'Elisa',
'Eloisa', 'Elsa', 'Elvia', 'Elvira', 'Eloy', 'Emilia', 'Emiliano',
'Emilio', 'Enrique', 'Eric', 'Ernesto', 'Esmeralda', 'Esteban',
'Estefanía', 'Estela', 'Esparta', 'Espartaco', 'Esperanza', 'Estela',
'Esther', 'Eugenia', 'Eugenio', 'Eva', 'Evelio', 'Fabiola',
'Federico', 'Felipe', 'Fernando', 'Felix', 'Fidel', 'Flavio',
'Florencia', 'Francisco', 'Francisco Javier', 'Francisca', 'Frida',
'Gabino', 'Gabriela', 'Gabriel', 'Genaro', 'Georgina', 'Gerardo',
'Gerónimo', 'Germán', 'Gilberto', 'Guillermina', 'Gloria',
'Gonzalo', 'Graciela', 'Gregorio', 'Guillermo',
'Guadalupe', 'Gustavo', 'Héctor', 'Helena', 'Hermelinda', 'Hernán',
'Hilda', 'Homero', 'Horacio', 'Hugo', 'Humberto', 'Ignacio', 'Ilse',
'Indira', 'Inés', 'Irene', 'Irma', 'Itzel', 'Isaac', 'Isabel',
'Isabela', 'Israel', 'Iván', 'Ivonne', 'Jacinto', 'Jacobo', 'Jaime',
'Javier', 'Jaqueline', 'Jerónimo', 'Jesús', 'Joaquín', 'Jonás',
'Jorge', 'Jorge Luis', 'Jos', 'José', 'Josefina', 'José Carlos',
'José Eduardo', 'José Emilio', 'José Luis', 'José Manuél',
'José María', 'Juan', 'Juana', 'Juan Carlos', 'Judith', 'Julia',
'Julio', 'Julio César', 'Laura', 'Leonardo', 'Leonel', 'Leonor',
'Karla', 'Karina', 'Leticia', 'Lorenzo', 'Lucas', 'Lilia', 'Liliana',
'Linda', 'Lorena', 'Lourdes', 'Lucía', 'Luisa', 'Luz', 'Luis',
'Luis Miguel', 'Luis Manuel', 'Magdalena', 'Manuel', 'Marco Antonio',
'Marcela', 'Marcos', 'Margarita', 'María', 'Marisela', 'Marisol',
'María del Carmen', 'María Cristina', 'María Elena',
'María Eugenia', 'María José', 'María Luisa', 'María Teresa', 'Marisol',
'Martha', 'Mayte', 'Mariano', 'Mariana', 'Mario', 'Martín', 'Mateo',
'Mauro', 'Mauricio', 'Maximiliano', 'Mercedes', 'Micaela', 'Minerva',
'Mitzy', 'Miguel', 'Miguel Ángel', 'Miriam', 'Modesto', 'Mónica',
'Nadia', 'Natalia', 'Natividad', 'Nancy', 'Nayeli', 'Nelly',
'Noelia', 'Noemí', 'Norma', 'Nicolás', 'Octavio', 'Ofelia', 'Olivia',
'Óliver', 'Olga', 'Óscar', 'Oswaldo', 'Omar', 'Pablo', 'Paola',
'Patricia', 'Pamela', 'Patricio', 'Pascual', 'Paulina', 'Pedro',
'Perla', 'Pilar', 'Porfirio', 'Rafaél', 'Ramiro', 'Ramón', 'Raúl',
'Raquel', 'Rebeca', 'Reina', 'Renato', 'René', 'Reynaldo',
'Ricardo', 'Roberto', 'Rodolfo', 'Rocío', 'Rodrigo', 'Rolando',
'Rosa', 'Rosalia', 'Rosario', 'Rubén', 'Rufino', 'Ruby', 'Salvador',
'Salma', 'Samuel', 'Sandra', 'Santiago', 'Sara', 'Sessa', 'Sergio',
'Serafín', 'Silvano', 'Silvia', 'Sofía', 'Socorro', 'Soledad',
'Sonia', 'Susana', 'Tania', 'Teresa', 'Teodoro', 'Timoteo', 'Tomás',
'Trinidad', 'Verónica', 'Vicente', 'Violeta', 'Uriel', 'Úrsula',
'Vanesa', 'Víctor', 'Victoria', 'Virginia', 'Wilfrido', 'Wendolin',
'Yeni', 'Yolanda', 'Yuridia', 'Zacarías', 'Zeferino', 'Zoé')
last_names = ('Abrego', 'Abreu', 'Acevedo', 'Acosta', 'Acuña',
'Adame', 'Aguayo', 'Aguilar', 'Aguilera', 'Aguirre', 'Alarcón', 'Alba',
'Alcala', 'Alcántar', 'Alcaraz', 'Alejandro', 'Alemán', 'Alfaro',
'Almanza', 'Almaraz', 'Almonte', 'Alonso', 'Alonzo', 'Altamirano',
'Alva', 'Alvarado', 'Alvarez', 'Amador', 'Amaya', 'Anaya', 'Anguiano',
'Angulo', 'Aparicio', 'Apodaca', 'Aponte', 'Aragón', 'Aranda', 'Arce',
'Archuleta', 'Arellano', 'Arenas', 'Arevalo', 'Arguello', 'Arias',
'Armas', 'Armendáriz', 'Armenta', 'Arredondo', 'Arreola', 'Arriaga',
'Arroyo', 'Arteaga', 'Ávalos', 'Ávila', 'Avilés', 'Ayala', 'Baca',
'Badillo', 'Báez', 'Baeza', 'Bahena', 'Balderas', 'Ballesteros',
'Bañuelos', 'Barajas', 'Barela', 'Barragán', 'Barraza', 'Barrera',
'Barreto', 'Barrientos', 'Barrios', 'Batista', 'Becerra', 'Beltrán',
'Benavides', 'Benavídez', 'Benítez', 'Bermúdez', 'Bernal', 'Berríos',
'Bétancourt', 'Blanco', 'Bonilla', 'Borrego', 'Botello', 'Bravo',
'Briones', 'Briseño', 'Brito', 'Bueno', 'Burgos', 'Bustamante',
'Bustos', 'Caballero', 'Cabán', 'Cabrera', 'Cadena', 'Caldera',
'Calderón', 'Calvillo', 'Camacho', 'Camarillo', 'Campos', 'Canales',
'Candelaria', 'Cano', 'Cantú', 'Caraballo', 'Carbajal', 'Cardenas',
'Cardona', 'Carmona', 'Carranza', 'Carrasco', 'Carreón', 'Carrera',
'Carrero', 'Carrillo', 'Carrión', 'Carvajal', 'Casanova', 'Casares',
'Casárez', 'Casas', 'Casillas', 'Castañeda', 'Castellanos', 'Castillo',
'Castro', 'Cavazos', 'Cazares', 'Ceballos', 'Cedillo', 'Ceja',
'Centeno', 'Cepeda', 'Cervantes', 'Cervántez', 'Chacón', 'Chapa',
'Chavarría', 'Chávez', 'Cintrón', 'Cisneros', 'Collado', 'Collazo',
'Colón', 'Colunga', 'Concepción', 'Contreras', 'Cordero', 'Córdova',
'Cornejo', 'Corona', 'Coronado', 'Corral', 'Corrales', 'Correa',
'Cortés', 'Cortez', 'Cotto', 'Covarrubias', 'Crespo', 'Cruz', 'Cuellar',
'Curiel', 'Dávila', 'de Anda', 'de Jesús', 'de la Crúz', 'de la Fuente',
'de la Garza', 'de la O', 'de la Rosa', 'de la Torre', 'de León',
'Delgadillo', 'Delgado', 'del Río', 'del Valle', 'Díaz', 'Domínguez',
'Duarte', 'Dueñas', 'Durán', 'Echeverría', 'Elizondo', 'Enríquez',
'Escalante', 'Escamilla', 'Escobar', 'Escobedo', 'Esparza', 'Espinal',
'Espino', 'Espinosa', 'Espinoza', 'Esquibel', 'Esquivel', 'Estévez',
'Estrada', 'Fajardo', 'Farías', 'Feliciano', 'Fernández', 'Ferrer',
'Fierro', 'Figueroa', 'Flores', 'Flórez', 'Fonseca', 'Franco', 'Frías',
'Fuentes', 'Gaitán', 'Galarza', 'Galindo', 'Gallardo', 'Gallegos',
'Galván', 'Gálvez', 'Gamboa', 'Gamez', 'Gaona', 'Garay', 'García',
'Garibay', 'Garica', 'Garrido', 'Garza', 'Gastélum', 'Gaytán', 'Gil',
'Girón', 'Godínez', 'Godoy', 'Gómez', 'Gonzales', 'González', 'Gollum',
'Gracia', 'Granado', 'Granados', 'Griego', 'Grijalva', 'Guajardo',
'Guardado', 'Guerra', 'Guerrero', 'Guevara', 'Guillen', 'Gurule',
'Gutiérrez', 'Guzmán', 'Haro', 'Henríquez', 'Heredia', 'Hernádez',
'Hernandes', 'Hernández', 'Herrera', 'Hidalgo', 'Hinojosa', 'Holguín',
'Huerta', 'Hurtado', 'Ibarra', 'Iglesias', 'Irizarry', 'Jaime',
'Jaimes', 'Jáquez', 'Jaramillo', 'Jasso', 'Jiménez', 'Jimínez',
'Juárez', 'Jurado', 'Laboy', 'Lara', 'Laureano', 'Leal', 'Lebrón',
'Ledesma', 'Leiva', 'Lemus', 'León', 'Lerma', 'Leyva', 'Limón',
'Linares', 'Lira', 'Llamas', 'Loera', 'Lomeli', 'Longoria', 'López',
'Lovato', 'Loya', 'Lozada', 'Lozano', 'Lucero', 'Lucio', 'Luevano',
'Lugo', 'Luna', 'Macías', 'Madera', 'Madrid', 'Madrigal', 'Maestas',
'Magaña', 'Malave', 'Maldonado', 'Manzanares', 'Mares', 'Marín',
'Márquez', 'Marrero', 'Marroquín', 'Martínez', 'Mascareñas', 'Mata',
'Mateo', 'Matías', 'Matos', 'Maya', 'Mayorga', 'Medina', 'Medrano',
'Mejía', 'Meléndez', 'Melgar', 'Mena', 'Menchaca', 'Méndez', 'Mendoza',
'Menéndez', 'Meraz', 'Mercado', 'Merino', 'Mesa', 'Meza', 'Miramontes',
'Miranda', 'Mireles', 'Mojica', 'Molina', 'Mondragón', 'Monroy',
'Montalvo', 'Montañez', 'Montaño', 'Montemayor', 'Montenegro',
'Montero', 'Montes', 'Montez', 'Montoya', 'Mora', 'Morales', 'Moreno',
'Mota', 'Moya', 'Munguía', 'Muñiz', 'Muñoz', 'Murillo', 'Muro',
'Nájera', 'Naranjo', 'Narváez', 'Nava', 'Navarrete', 'Navarro',
'Nazario', 'Negrete', 'Negrón', 'Nevárez', 'Nieto', 'Nieves', 'Niño',
'Noriega', 'Núñez', 'Ocampo', 'Ocasio', 'Ochoa', 'Ojeda', 'Olivares',
'Olivárez', 'Olivas', 'Olivera', 'Olivo', 'Olmos', 'Olvera',
'Ontiveros', 'Oquendo', 'Ordóñez', 'Orellana', 'Ornelas', 'Orosco',
'Orozco', 'Orta', 'Ortega', 'Ortiz', 'Osorio', 'Otero', 'Ozuna',
'Pabón', 'Pacheco', 'Padilla', 'Padrón', 'Páez', 'Palacios', 'Palomino',
'Palomo', 'Pantoja', 'Paredes', 'Parra', 'Partida', 'Patiño', 'Paz',
'Pedraza', 'Pedroza', 'Pelayo', 'Peña', 'Perales', 'Peralta', 'Perea',
'Peres', 'Pérez', 'Pichardo', 'Piña', 'Pineda', 'Pizarro', 'Polanco',
'Ponce', 'Porras', 'Portillo', 'Posada', 'Prado', 'Preciado', 'Prieto',
'Puente', 'Puga', 'Pulido', 'Quesada', 'Quezada', 'Quiñones',
'Quiñónez', 'Quintana', 'Quintanilla', 'Quintero', 'Quiroz', 'Rael',
'Ramírez', 'Ramón', 'Ramos', 'Rangel', 'Rascón', 'Raya', 'Razo',
'Regalado', 'Rendón', 'Rentería', 'Reséndez', 'Reyes', 'Reyna',
'Reynoso', 'Rico', 'Rincón', 'Riojas', 'Ríos', 'Rivas', 'Rivera',
'Rivero', 'Robledo', 'Robles', 'Rocha', 'Rodarte', 'Rodrígez',
'Rodríguez', 'Rodríquez', 'Rojas', 'Rojo', 'Roldán', 'Rolón', 'Romero',
'Romo', 'Roque', 'Rosado', 'Rosales', 'Rosario', 'Rosas', 'Roybal',
'Rubio', 'Ruelas', 'Ruiz', 'Saavedra', 'Sáenz', 'Saiz', 'Salas',
'Salazar', 'Salcedo', 'Salcido', 'Saldaña', 'Saldivar', 'Salgado',
'Salinas', 'Samaniego', 'Sanabria', 'Sanches', 'Sánchez', 'Sandoval',
'Santacruz', 'Santana', 'Santiago', 'Santillán', 'Sarabia', 'Sauceda',
'Saucedo', 'Segovia', 'Segura', 'Sepúlveda', 'Serna', 'Serrano',
'Serrato', 'Sevilla', 'Sierra', 'Sisneros', 'Solano', 'Solís', 'Soliz',
'Solorio', 'Solorzano', 'Soria', 'Sosa', 'Sotelo', 'Soto', 'Suárez',
'Tafoya', 'Tamayo', 'Tamez', 'Tapia', 'Tejada', 'Tejeda', 'Téllez',
'Tello', 'Terán', 'Terrazas', 'Tijerina', 'Tirado', 'Toledo', 'Toro',
'Torres', 'Tórrez', 'Tovar', 'Trejo', 'Treviño', 'Trujillo', 'Ulibarri',
'Ulloa', 'Urbina', 'Ureña', 'Urías', 'Uribe', 'Urrutia', 'Vaca',
'Valadez', 'Valdés', 'Valdez', 'Valdivia', 'Valencia', 'Valentín',
'Valenzuela', 'Valladares', 'Valle', 'Vallejo', 'Valles', 'Valverde',
'Vanegas', 'Varela', 'Vargas', 'Vásquez', 'Vázquez', 'Vega', 'Vela',
'Velasco', 'Velásquez', 'Velázquez', 'Vélez', 'Véliz', 'Venegas',
'Vera', 'Verdugo', 'Verduzco', 'Vergara', 'Viera', 'Vigil', 'Villa',
'Villagómez', 'Villalobos', 'Villalpando', 'Villanueva', 'Villareal',
'Villarreal', 'Villaseñor', 'Villegas', 'Yáñez', 'Ybarra', 'Zambrano',
'Zamora', 'Zamudio', 'Zapata', 'Zaragoza', 'Zarate', 'Zavala', 'Zayas',
'Zedillo', 'Zelaya', 'Zepeda', 'Zúñiga')
prefixes = ('Sr(a).', 'Dr.', 'Mtro.', 'Lic.',
'Ing.')
|
davidfraser/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/dialects/mssql/zxjdbc.py
|
59
|
# mssql/zxjdbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname\
[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import MSDialect, MSExecutionContext
from ... import engine
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
|
ghber/My-Django-Nonrel
|
refs/heads/master
|
django/contrib/contenttypes/generic.py
|
155
|
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.utils.encoding import smart_unicode
from django.utils.functional import curry
from django.contrib.contenttypes.models import ContentType
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError(u"%s must be accessed via instance" % self.related.opts.object_name)
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self, connection):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(self.model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table = qn(self.field.m2m_db_table()),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance),
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.core_filters = core_filters or {}
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.join_table = model._meta.db_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.pk_val = self.instance._get_pk_val()
def get_query_set(self):
db = self._db or router.db_for_read(self.model, instance=self.instance)
query = {
'%s__pk' % self.content_type_field_name : self.content_type.id,
'%s__exact' % self.object_id_field_name : self.pk_val,
}
return superclass.get_query_set(self).using(db).filter(**query)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
#@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
exclude = exclude or None
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": self.can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
Arabidopsis-Information-Portal/PMR_API
|
refs/heads/master
|
services/boxplot_api/service.py
|
1
|
# PMR WebServices
# Copyright (C) 2016 Manhoi Hur, Belyaeva, Irina
# This file is part of PMR WebServices API.
#
# PMR API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# PMR API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PMR API. If not, see <http://www.gnu.org/licenses/>.
"""
Returns base urls for the underlying endpoints
"""
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# This function returns the boxplot base url
def get_boxplot_base_url():
"""Return the boxplot base url
:rtype: string
:return: Returns the boxplot base url
"""
return 'http://pmr-webapi.gdcb.iastate.edu/pmrWebApi/api/v1/boxplot/list'
# This function returns the metabolites base url
def get_list_base_url():
"""Return the metabolite list base url
:rtype: string
:return: Returns the metabolite list base url
"""
return 'http://pmr-webapi.gdcb.iastate.edu/pmrWebApi/api/v1/metabolites/list'
|
queenp/wakatime
|
refs/heads/master
|
wakatime/packages/requests/packages/chardet/euckrfreq.py
|
3120
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
|
polyval/CNC
|
refs/heads/master
|
flask/Lib/encodings/uu_codec.py
|
37
|
""" Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
# using str() because of cStringIO's Unicode undesired Unicode behavior.
infile = StringIO(str(input))
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(str(input))
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
cognitiveclass/edx-platform
|
refs/heads/master
|
lms/djangoapps/teams/tests/test_views.py
|
18
|
# -*- coding: utf-8 -*-
"""Tests for the teams API at the HTTP request level."""
import json
from datetime import datetime
import pytz
from dateutil import parser
import ddt
from elasticsearch.exceptions import ConnectionError
from mock import patch
from search.search_engine_base import SearchEngine
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models.signals import post_save
from django.utils import translation
from nose.plugins.attrib import attr
from rest_framework.test import APITestCase, APIClient
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.factories import StaffFactory
from common.test.utils import skip_signal
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from util.testing import EventTestMixin
from .factories import CourseTeamFactory, LAST_ACTIVITY_AT
from ..models import CourseTeamMembership
from ..search_indexes import CourseTeamIndexer, CourseTeam, course_team_post_save_callback
from django_comment_common.models import Role, FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
@attr('shard_1')
class TestDashboard(SharedModuleStoreTestCase):
"""Tests for the Teams dashboard."""
test_password = "test"
NUM_TOPICS = 10
@classmethod
def setUpClass(cls):
super(TestDashboard, cls).setUpClass()
cls.course = CourseFactory.create(
teams_configuration={
"max_team_size": 10,
"topics": [
{
"name": "Topic {}".format(topic_id),
"id": topic_id,
"description": "Description for topic {}".format(topic_id)
}
for topic_id in range(cls.NUM_TOPICS)
]
}
)
def setUp(self):
"""
Set up tests
"""
super(TestDashboard, self).setUp()
# will be assigned to self.client by default
self.user = UserFactory.create(password=self.test_password)
self.teams_url = reverse('teams_dashboard', args=[self.course.id])
def test_anonymous(self):
"""Verifies that an anonymous client cannot access the team
dashboard, and is redirected to the login page."""
anonymous_client = APIClient()
response = anonymous_client.get(self.teams_url)
redirect_url = '{0}?next={1}'.format(settings.LOGIN_URL, self.teams_url)
self.assertRedirects(response, redirect_url)
def test_not_enrolled_not_staff(self):
""" Verifies that a student who is not enrolled cannot access the team dashboard. """
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_staff(self):
"""
Verifies that a user with global access who is not enrolled in the course can access the team dashboard.
"""
staff_user = UserFactory(is_staff=True, password=self.test_password)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=self.test_password)
response = staff_client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_not_staff(self):
"""
Verifies that a user without global access who is enrolled in the course can access the team dashboard.
"""
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_teams_not_enabled(self):
"""
Verifies that a user without global access who is enrolled in the course cannot access the team dashboard
if the teams feature is not enabled.
"""
course = CourseFactory.create()
teams_url = reverse('teams_dashboard', args=[course.id])
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(teams_url)
self.assertEqual(404, response.status_code)
def test_query_counts(self):
# Enroll in the course and log in
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
# Check the query count on the dashboard With no teams
with self.assertNumQueries(17):
self.client.get(self.teams_url)
# Create some teams
for topic_id in range(self.NUM_TOPICS):
team = CourseTeamFactory.create(
name=u"Team for topic {}".format(topic_id),
course_id=self.course.id,
topic_id=topic_id,
)
# Add the user to the last team
team.add_user(self.user)
# Check the query count on the dashboard again
with self.assertNumQueries(23):
self.client.get(self.teams_url)
def test_bad_course_id(self):
"""
Verifies expected behavior when course_id does not reference an existing course or is invalid.
"""
bad_org = "badorgxxx"
bad_team_url = self.teams_url.replace(self.course.id.org, bad_org)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
bad_team_url = bad_team_url.replace(bad_org, "invalid/course/id")
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create a course two
course_two = CourseFactory.create(
teams_configuration={
"max_team_size": 1,
"topics": [
{
"name": "Test topic for course two",
"id": 1,
"description": "Description for test topic for course two."
}
]
}
)
# Login and enroll user in both course course
self.client.login(username=self.user.username, password=self.test_password)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.user, course_id=course_two.id)
# Create teams in both courses
course_one_team = CourseTeamFactory.create(name="Course one team", course_id=self.course.id, topic_id=1)
course_two_team = CourseTeamFactory.create(name="Course two team", course_id=course_two.id, topic_id=1) # pylint: disable=unused-variable
# Check that initially list of user teams in course one is empty
course_one_teams_url = reverse('teams_dashboard', args=[self.course.id])
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
# Add user to a course one team
course_one_team.add_user(self.user)
# Check that list of user teams in course one is not empty, it is one now
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 1', response.content)
# Check that list of user teams in course two is still empty
course_two_teams_url = reverse('teams_dashboard', args=[course_two.id])
response = self.client.get(course_two_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
"""Base class for Team API test cases."""
test_password = 'password'
@classmethod
def setUpClass(cls):
super(TeamAPITestCase, cls).setUpClass()
teams_configuration_1 = {
'topics':
[
{
'id': 'topic_{}'.format(i),
'name': name,
'description': 'Description for topic {}.'.format(i)
} for i, name in enumerate([u'Sólar power', 'Wind Power', 'Nuclear Power', 'Coal Power'])
]
}
cls.test_course_1 = CourseFactory.create(
org='TestX',
course='TS101',
display_name='Test Course',
teams_configuration=teams_configuration_1
)
teams_configuration_2 = {
'topics':
[
{
'id': 'topic_5',
'name': 'Other Interests',
'description': 'Description for topic 5.'
},
{
'id': 'topic_6',
'name': 'Public Profiles',
'description': 'Description for topic 6.'
},
],
'max_team_size': 1
}
cls.test_course_2 = CourseFactory.create(
org='MIT',
course='6.002x',
display_name='Circuits',
teams_configuration=teams_configuration_2
)
def setUp(self):
super(TeamAPITestCase, self).setUp()
self.topics_count = 4
self.users = {
'staff': AdminFactory.create(password=self.test_password),
'course_staff': StaffFactory.create(course_key=self.test_course_1.id, password=self.test_password)
}
self.create_and_enroll_student(username='student_enrolled')
self.create_and_enroll_student(username='student_enrolled_not_on_team')
self.create_and_enroll_student(username='student_unenrolled', courses=[])
# Make this student a community TA.
self.create_and_enroll_student(username='community_ta')
seed_permissions_roles(self.test_course_1.id)
community_ta_role = Role.objects.get(name=FORUM_ROLE_COMMUNITY_TA, course_id=self.test_course_1.id)
community_ta_role.users.add(self.users['community_ta'])
# This student is enrolled in both test courses and is a member of a team in each course, but is not on the
# same team as student_enrolled.
self.create_and_enroll_student(
courses=[self.test_course_1, self.test_course_2],
username='student_enrolled_both_courses_other_team'
)
# Make this student have a public profile
self.create_and_enroll_student(
courses=[self.test_course_2],
username='student_enrolled_public_profile'
)
profile = self.users['student_enrolled_public_profile'].profile
profile.year_of_birth = 1970
profile.save()
# This student is enrolled in the other course, but not yet a member of a team. This is to allow
# course_2 to use a max_team_size of 1 without breaking other tests on course_1
self.create_and_enroll_student(
courses=[self.test_course_2],
username='student_enrolled_other_course_not_on_team'
)
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
self.solar_team = CourseTeamFactory.create(
name=u'Sólar team',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.wind_team = CourseTeamFactory.create(name='Wind Team', course_id=self.test_course_1.id)
self.nuclear_team = CourseTeamFactory.create(name='Nuclear Team', course_id=self.test_course_1.id)
self.another_team = CourseTeamFactory.create(name='Another Team', course_id=self.test_course_2.id)
self.public_profile_team = CourseTeamFactory.create(
name='Public Profile Team',
course_id=self.test_course_2.id,
topic_id='topic_6'
)
self.search_team = CourseTeamFactory.create(
name='Search',
description='queryable text',
country='GS',
language='to',
course_id=self.test_course_2.id,
topic_id='topic_7'
)
self.chinese_team = CourseTeamFactory.create(
name=u'著文企臺個',
description=u'共樣地面較,件展冷不護者這與民教過住意,國制銀產物助音是勢一友',
country='CN',
language='zh_HANS',
course_id=self.test_course_2.id,
topic_id='topic_7'
)
self.test_team_name_id_map = {team.name: team for team in (
self.solar_team,
self.wind_team,
self.nuclear_team,
self.another_team,
self.public_profile_team,
self.search_team,
self.chinese_team,
)}
for user, course in [('staff', self.test_course_1), ('course_staff', self.test_course_1)]:
CourseEnrollment.enroll(
self.users[user], course.id, check_access=True
)
# Django Rest Framework v3 requires us to pass a request to serializers
# that have URL fields. Since we're invoking this code outside the context
# of a request, we need to simulate that there's a request.
self.solar_team.add_user(self.users['student_enrolled'])
self.nuclear_team.add_user(self.users['student_enrolled_both_courses_other_team'])
self.another_team.add_user(self.users['student_enrolled_both_courses_other_team'])
self.public_profile_team.add_user(self.users['student_enrolled_public_profile'])
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
def create_and_enroll_student(self, courses=None, username=None):
""" Creates a new student and enrolls that student in the course.
Adds the new user to the self.users dictionary with the username as the key.
Returns the username once the user has been created.
"""
if username is not None:
user = UserFactory.create(password=self.test_password, username=username)
else:
user = UserFactory.create(password=self.test_password)
courses = courses if courses is not None else [self.test_course_1]
for course in courses:
CourseEnrollment.enroll(user, course.id, check_access=True)
self.users[user.username] = user
return user.username
def login(self, user):
"""Given a user string, logs the given user in.
Used for testing with ddt, which does not have access to self in
decorators. If user is 'student_inactive', then an inactive user will
be both created and logged in.
"""
if user == 'student_inactive':
student_inactive = UserFactory.create(password=self.test_password)
self.client.login(username=student_inactive.username, password=self.test_password)
student_inactive.is_active = False
student_inactive.save()
else:
self.client.login(username=self.users[user].username, password=self.test_password)
def make_call(self, url, expected_status=200, method='get', data=None, content_type=None, **kwargs):
"""Makes a call to the Team API at the given url with method and data.
If a user is specified in kwargs, that user is first logged in.
"""
user = kwargs.pop('user', 'student_enrolled_not_on_team')
if user:
self.login(user)
func = getattr(self.client, method)
if content_type:
response = func(url, data=data, content_type=content_type)
else:
response = func(url, data=data)
self.assertEqual(
expected_status,
response.status_code,
msg="Expected status {expected} but got {actual}: {content}".format(
expected=expected_status,
actual=response.status_code,
content=response.content,
)
)
if expected_status == 200:
return json.loads(response.content)
else:
return response
def get_teams_list(self, expected_status=200, data=None, no_course_id=False, **kwargs):
"""Gets the list of teams as the given user with data as query params. Verifies expected_status."""
data = data if data else {}
if 'course_id' not in data and not no_course_id:
data.update({'course_id': self.test_course_1.id})
return self.make_call(reverse('teams_list'), expected_status, 'get', data, **kwargs)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create and enroll user in both courses
user = self.create_and_enroll_student(
courses=[self.test_course_1, self.test_course_2],
username='test_user_enrolled_both_courses'
)
course_one_data = {'course_id': self.test_course_1.id, 'username': user}
course_two_data = {'course_id': self.test_course_2.id, 'username': user}
# Check that initially list of user teams in course one is empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 0)
# Add user to a course one team
self.solar_team.add_user(self.users[user])
# Check that list of user teams in course one is not empty now
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 1)
# Check that list of user teams in course two is still empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_two_data)
self.assertEqual(team_list['count'], 0)
def build_team_data(self, name="Test team", course=None, description="Filler description", **kwargs):
"""Creates the payload for creating a team. kwargs can be used to specify additional fields."""
data = kwargs
course = course if course else self.test_course_1
data.update({
'name': name,
'course_id': str(course.id),
'description': description,
})
return data
def post_create_team(self, expected_status=200, data=None, **kwargs):
"""Posts data to the team creation endpoint. Verifies expected_status."""
return self.make_call(reverse('teams_list'), expected_status, 'post', data, **kwargs)
def get_team_detail(self, team_id, expected_status=200, data=None, **kwargs):
"""Gets detailed team information for team_id. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'get', data, **kwargs)
def delete_team(self, team_id, expected_status, **kwargs):
"""Delete the given team. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'delete', **kwargs)
def patch_team_detail(self, team_id, expected_status, data=None, **kwargs):
"""Patches the team with team_id using data. Verifies expected_status."""
return self.make_call(
reverse('teams_detail', args=[team_id]),
expected_status,
'patch',
json.dumps(data) if data else None,
'application/merge-patch+json',
**kwargs
)
def get_topics_list(self, expected_status=200, data=None, **kwargs):
"""Gets the list of topics, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('topics_list'), expected_status, 'get', data, **kwargs)
def get_topic_detail(self, topic_id, course_id, expected_status=200, data=None, **kwargs):
"""Gets a single topic, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('topics_detail', kwargs={'topic_id': topic_id, 'course_id': str(course_id)}),
expected_status,
'get',
data,
**kwargs
)
def get_membership_list(self, expected_status=200, data=None, **kwargs):
"""Gets the membership list, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'get', data, **kwargs)
def post_create_membership(self, expected_status=200, data=None, **kwargs):
"""Posts data to the membership creation endpoint. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'post', data, **kwargs)
def get_membership_detail(self, team_id, username, expected_status=200, data=None, **kwargs):
"""Gets an individual membership record, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'get',
data,
**kwargs
)
def delete_membership(self, team_id, username, expected_status=200, **kwargs):
"""Deletes an individual membership record. Verifies expected_status."""
url = reverse('team_membership_detail', args=[team_id, username]) + '?admin=true'
return self.make_call(url, expected_status, 'delete', **kwargs)
def verify_expanded_public_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'bio', 'country', 'profile_image', 'time_zone', 'language_proficiencies']:
self.assertIn(field, user)
def verify_expanded_private_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'profile_image']:
self.assertIn(field, user)
for field in ['bio', 'country', 'time_zone', 'language_proficiencies']:
self.assertNotIn(field, user)
def verify_expanded_team(self, team):
"""Verifies that fields exist on the returned team json indicating that it is expanded."""
for field in ['id', 'name', 'course_id', 'topic_id', 'date_created', 'description']:
self.assertIn(field, team)
@ddt.ddt
class TestListTeamsAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team listing API endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestListTeamsAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
teams = self.get_teams_list(user=user, expected_status=status)
if status == 200:
self.assertEqual(3, teams['count'])
def test_missing_course_id(self):
self.get_teams_list(400, no_course_id=True)
def verify_names(self, data, status, names=None, **kwargs):
"""Gets a team listing with data as query params, verifies status, and then verifies team names if specified."""
teams = self.get_teams_list(data=data, expected_status=status, **kwargs)
if names is not None and 200 <= status < 300:
results = teams['results']
self.assertEqual(names, [team['name'] for team in results])
def test_filter_invalid_course_id(self):
self.verify_names({'course_id': 'no_such_course'}, 400)
def test_filter_course_id(self):
self.verify_names(
{'course_id': self.test_course_2.id},
200,
['Another Team', 'Public Profile Team', 'Search', u'著文企臺個'],
user='staff'
)
def test_filter_topic_id(self):
self.verify_names({'course_id': self.test_course_1.id, 'topic_id': 'topic_0'}, 200, [u'Sólar team'])
def test_filter_username(self):
self.verify_names({'course_id': self.test_course_1.id, 'username': 'student_enrolled'}, 200, [u'Sólar team'])
self.verify_names({'course_id': self.test_course_1.id, 'username': 'staff'}, 200, [])
@ddt.data(
(None, 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
('name', 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
# Note that "Nuclear Team" and "Solar team" have the same open_slots.
# "Solar team" comes first due to secondary sort by last_activity_at.
('open_slots', 200, ['Wind Team', u'Sólar team', 'Nuclear Team']),
# Note that "Wind Team" and "Nuclear Team" have the same last_activity_at.
# "Wind Team" comes first due to secondary sort by open_slots.
('last_activity_at', 200, [u'Sólar team', 'Wind Team', 'Nuclear Team']),
)
@ddt.unpack
def test_order_by(self, field, status, names):
# Make "Solar team" the most recently active team.
# The CourseTeamFactory sets the last_activity_at to a fixed time (in the past), so all of the
# other teams have the same last_activity_at.
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
solar_team = self.test_team_name_id_map[u'Sólar team']
solar_team.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
solar_team.save()
data = {'order_by': field} if field else {}
self.verify_names(data, status, names)
def test_order_by_with_text_search(self):
data = {'order_by': 'name', 'text_search': 'search'}
self.verify_names(data, 400, [])
self.assert_no_events_were_emitted()
@ddt.data((404, {'course_id': 'no/such/course'}), (400, {'topic_id': 'no_such_topic'}))
@ddt.unpack
def test_no_results(self, status, data):
self.get_teams_list(status, data)
def test_page_size(self):
result = self.get_teams_list(200, {'page_size': 2})
self.assertEquals(2, result['num_pages'])
def test_page(self):
result = self.get_teams_list(200, {'page_size': 1, 'page': 3})
self.assertEquals(3, result['num_pages'])
self.assertIsNone(result['next'])
self.assertIsNotNone(result['previous'])
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_teams_list(200, {'expand': 'user', 'topic_id': 'topic_0'})
self.verify_expanded_private_user(result['results'][0]['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_teams_list(
200,
{
'expand': 'user',
'topic_id': 'topic_6',
'course_id': self.test_course_2.id
},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['membership'][0]['user'])
@ddt.data(
('search', ['Search']),
('queryable', ['Search']),
('Tonga', ['Search']),
('Island', ['Search']),
('not-a-query', []),
('team', ['Another Team', 'Public Profile Team']),
(u'著文企臺個', [u'著文企臺個']),
)
@ddt.unpack
def test_text_search(self, text_search, expected_team_names):
def reset_search_index():
"""Clear out the search index and reindex the teams."""
CourseTeamIndexer.engine().destroy()
for team in self.test_team_name_id_map.values():
CourseTeamIndexer.index(team)
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
self.assert_event_emitted(
'edx.team.searched',
search_text=text_search,
topic_id=None,
number_of_results=len(expected_team_names)
)
# Verify that the searches still work for a user from a different locale
with translation.override('ar'):
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
def test_delete_removed_from_search(self):
team = CourseTeamFactory.create(
name=u'zoinks',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[team.name],
user='staff'
)
team.delete()
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[],
user='staff'
)
@ddt.ddt
class TestCreateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled_not_on_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.post_create_team(status, self.build_team_data(name="New Team"), user=user)
if status == 200:
self.verify_expected_team_id(team, 'new-team')
teams = self.get_teams_list(user=user)
self.assertIn("New Team", [team['name'] for team in teams['results']])
def _expected_team_id(self, team, expected_prefix):
""" Return the team id that we'd expect given this team data and this prefix. """
return expected_prefix + '-' + team['discussion_topic_id']
def verify_expected_team_id(self, team, expected_prefix):
""" Verifies that the team id starts with the specified prefix and ends with the discussion_topic_id """
self.assertIn('id', team)
self.assertIn('discussion_topic_id', team)
self.assertEqual(team['id'], self._expected_team_id(team, expected_prefix))
def test_naming(self):
new_teams = [
self.post_create_team(data=self.build_team_data(name=name), user=self.create_and_enroll_student())
for name in ["The Best Team", "The Best Team", "A really long team name"]
]
# Check that teams with the same name have unique IDs.
self.verify_expected_team_id(new_teams[0], 'the-best-team')
self.verify_expected_team_id(new_teams[1], 'the-best-team')
self.assertNotEqual(new_teams[0]['id'], new_teams[1]['id'])
# Verify expected truncation behavior with names > 20 characters.
self.verify_expected_team_id(new_teams[2], 'a-really-long-team-n')
@ddt.data((400, {
'name': 'Bad Course ID',
'course_id': 'no_such_course',
'description': "Filler Description"
}), (404, {
'name': "Non-existent course ID",
'course_id': 'no/such/course',
'description': "Filler Description"
}))
@ddt.unpack
def test_bad_course_data(self, status, data):
self.post_create_team(status, data)
def test_student_in_team(self):
response = self.post_create_team(
400,
data=self.build_team_data(
name="Doomed team",
course=self.test_course_1,
description="Overly ambitious student"
),
user='student_enrolled'
)
self.assertEqual(
"You are already in a team in this course.",
json.loads(response.content)["user_message"]
)
@ddt.data('staff', 'course_staff', 'community_ta')
def test_privileged_create_multiple_teams(self, user):
""" Privileged users can create multiple teams, even if they are already in one. """
# First add the privileged user to a team.
self.post_create_membership(
200,
self.build_membership_data(user, self.solar_team),
user=user
)
self.post_create_team(
data=self.build_team_data(
name="Another team",
course=self.test_course_1,
description="Privileged users are the best"
),
user=user
)
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
self.post_create_team(400, self.build_team_data(**kwargs))
def test_missing_name(self):
self.post_create_team(400, {
'course_id': str(self.test_course_1.id),
'description': "foobar"
})
def test_full_student_creator(self):
creator = self.create_and_enroll_student()
team = self.post_create_team(data=self.build_team_data(
name="Fully specified team",
course=self.test_course_1,
description="Another fantastic team",
topic_id='great-topic',
country='CA',
language='fr'
), user=creator)
# Verify the id (it ends with a unique hash, which is the same as the discussion_id).
self.verify_expected_team_id(team, 'fully-specified-team')
del team['id']
self.assert_event_emitted(
'edx.team.created',
team_id=self._expected_team_id(team, 'fully-specified-team'),
)
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self._expected_team_id(team, 'fully-specified-team'),
user_id=self.users[creator].id,
add_method='added_on_create'
)
# Remove date_created and discussion_topic_id because they change between test runs
del team['date_created']
del team['discussion_topic_id']
# Since membership is its own list, we want to examine this separately.
team_membership = team['membership']
del team['membership']
# verify that it's been set to a time today.
self.assertEqual(
parser.parse(team['last_activity_at']).date(),
datetime.utcnow().replace(tzinfo=pytz.utc).date()
)
del team['last_activity_at']
# Verify that the creating user gets added to the team.
self.assertEqual(len(team_membership), 1)
member = team_membership[0]['user']
self.assertEqual(member['username'], creator)
self.assertEqual(team, {
'name': 'Fully specified team',
'language': 'fr',
'country': 'CA',
'topic_id': 'great-topic',
'course_id': str(self.test_course_1.id),
'description': 'Another fantastic team'
})
@ddt.data('staff', 'course_staff', 'community_ta')
def test_membership_staff_creator(self, user):
# Verify that staff do not automatically get added to a team
# when they create one.
team = self.post_create_team(data=self.build_team_data(
name="New team",
course=self.test_course_1,
description="Another fantastic team",
), user=user)
self.assertEqual(team['membership'], [])
@ddt.ddt
class TestDetailTeamAPI(TeamAPITestCase):
"""Test cases for the team detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.get_team_detail(self.solar_team.team_id, status, user=user)
if status == 200:
self.assertEqual(team['description'], self.solar_team.description)
self.assertEqual(team['discussion_topic_id'], self.solar_team.discussion_topic_id)
self.assertEqual(parser.parse(team['last_activity_at']), LAST_ACTIVITY_AT)
def test_does_not_exist(self):
self.get_team_detail('no_such_team', 404)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_team_detail(self.solar_team.team_id, 200, {'expand': 'user'})
self.verify_expanded_private_user(result['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_team_detail(
self.public_profile_team.team_id,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['membership'][0]['user'])
@ddt.ddt
class TestDeleteTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team delete endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 204),
('course_staff', 204),
('community_ta', 204)
)
@ddt.unpack
def test_access(self, user, status):
self.delete_team(self.solar_team.team_id, status, user=user)
if status == 204:
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
def test_does_not_exist(self):
self.delete_team('nonexistent', 404)
def test_memberships_deleted(self):
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 1)
self.delete_team(self.solar_team.team_id, 204, user='staff')
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 0)
@ddt.ddt
class TestUpdateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team update endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestUpdateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
prev_name = self.solar_team.name
team = self.patch_team_detail(self.solar_team.team_id, status, {'name': 'foo'}, user=user)
if status == 200:
self.assertEquals(team['name'], 'foo')
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field='name',
old=prev_name,
new='foo'
)
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 404),
('staff', 404),
('course_staff', 404),
('community_ta', 404),
)
@ddt.unpack
def test_access_bad_id(self, user, status):
self.patch_team_detail("no_such_team", status, {'name': 'foo'}, user=user)
@ddt.data(
('id', 'foobar'),
('description', ''),
('country', 'no_such_country'),
('language', 'no_such_language')
)
@ddt.unpack
def test_bad_requests(self, key, value):
self.patch_team_detail(self.solar_team.team_id, 400, {key: value}, user='staff')
@ddt.data(('country', 'US'), ('language', 'en'), ('foo', 'bar'))
@ddt.unpack
def test_good_requests(self, key, value):
if hasattr(self.solar_team, key):
prev_value = getattr(self.solar_team, key)
self.patch_team_detail(self.solar_team.team_id, 200, {key: value}, user='staff')
if hasattr(self.solar_team, key):
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field=key,
old=prev_value,
new=value
)
def test_does_not_exist(self):
self.patch_team_detail('no_such_team', 404, user='staff')
@ddt.ddt
class TestListTopicsAPI(TeamAPITestCase):
"""Test cases for the topic listing endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topics = self.get_topics_list(status, {'course_id': self.test_course_1.id}, user=user)
if status == 200:
self.assertEqual(topics['count'], self.topics_count)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_key(self, course_id):
self.get_topics_list(404, {'course_id': course_id})
def test_without_course_id(self):
self.get_topics_list(400)
@ddt.data(
(None, 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
('name', 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
# Note that "Nuclear Power" and "Solar power" both have 2 teams. "Coal Power" and "Window Power"
# both have 0 teams. The secondary sort is alphabetical by name.
('team_count', 200, ['Nuclear Power', u'Sólar power', 'Coal Power', 'Wind Power'], 'team_count'),
('no_such_field', 400, [], None),
)
@ddt.unpack
def test_order_by(self, field, status, names, expected_ordering):
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Nuclear Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Nuclear Team 1', course_id=self.test_course_1.id, topic_id='topic_2'
)
CourseTeamFactory.create(
name=u'Nuclear Team 2', course_id=self.test_course_1.id, topic_id='topic_2'
)
data = {'course_id': self.test_course_1.id}
if field:
data['order_by'] = field
topics = self.get_topics_list(status, data)
if status == 200:
self.assertEqual(names, [topic['name'] for topic in topics['results']])
self.assertEqual(topics['sort_order'], expected_ordering)
def test_order_by_team_count_secondary(self):
"""
Ensure that the secondary sort (alphabetical) when primary sort is team_count
works across pagination boundaries.
"""
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Wind Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Wind Team 1', course_id=self.test_course_1.id, topic_id='topic_1'
)
CourseTeamFactory.create(
name=u'Wind Team 2', course_id=self.test_course_1.id, topic_id='topic_1'
)
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 1,
'order_by': 'team_count'
})
self.assertEqual(["Wind Power", u'Sólar power'], [topic['name'] for topic in topics['results']])
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 2,
'order_by': 'team_count'
})
self.assertEqual(["Coal Power", "Nuclear Power"], [topic['name'] for topic in topics['results']])
def test_pagination(self):
response = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
})
self.assertEqual(2, len(response['results']))
self.assertIn('next', response)
self.assertIn('previous', response)
self.assertIsNone(response['previous'])
self.assertIsNotNone(response['next'])
def test_default_ordering(self):
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
self.assertEqual(response['sort_order'], 'name')
def test_team_count(self):
"""Test that team_count is included for each topic"""
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
for topic in response['results']:
self.assertIn('team_count', topic)
if topic['id'] == u'topic_0':
self.assertEqual(topic['team_count'], 1)
else:
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestDetailTopicAPI(TeamAPITestCase):
"""Test cases for the topic detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topic = self.get_topic_detail('topic_0', self.test_course_1.id, status, user=user)
if status == 200:
for field in ('id', 'name', 'description'):
self.assertIn(field, topic)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_id(self, course_id):
self.get_topic_detail('topic_0', course_id, 404)
def test_invalid_topic_id(self):
self.get_topic_detail('no_such_topic', self.test_course_1.id, 404)
def test_team_count(self):
"""Test that team_count is included with a topic"""
topic = self.get_topic_detail(topic_id='topic_0', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 1)
topic = self.get_topic_detail(topic_id='topic_1', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestListMembershipAPI(TeamAPITestCase):
"""Test cases for the membership list endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 200),
('student_enrolled_both_courses_other_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id}, user=user)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['user']['username'], self.users['student_enrolled'].username)
@ddt.data(
(None, 401, False),
('student_inactive', 401, False),
('student_unenrolled', 200, False),
('student_enrolled', 200, True),
('student_enrolled_both_courses_other_team', 200, True),
('staff', 200, True),
('course_staff', 200, True),
('community_ta', 200, True),
)
@ddt.unpack
def test_access_by_username(self, user, status, has_content):
membership = self.get_membership_list(status, {'username': self.users['student_enrolled'].username}, user=user)
if status == 200:
if has_content:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
else:
self.assertEqual(membership['count'], 0)
@ddt.data(
('student_enrolled_both_courses_other_team', 'TestX/TS101/Test_Course', 200, 'Nuclear Team'),
('student_enrolled_both_courses_other_team', 'MIT/6.002x/Circuits', 200, 'Another Team'),
('student_enrolled', 'TestX/TS101/Test_Course', 200, u'Sólar team'),
('student_enrolled', 'MIT/6.002x/Circuits', 400, ''),
)
@ddt.unpack
def test_course_filter_with_username(self, user, course_id, status, team_name):
membership = self.get_membership_list(
status,
{
'username': self.users[user],
'course_id': course_id
},
user=user
)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.test_team_name_id_map[team_name].team_id)
@ddt.data(
('TestX/TS101/Test_Course', 200),
('MIT/6.002x/Circuits', 400),
)
@ddt.unpack
def test_course_filter_with_team_id(self, course_id, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id, 'course_id': course_id})
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
def test_bad_course_id(self):
self.get_membership_list(404, {'course_id': 'no_such_course'})
def test_no_username_or_team_id(self):
self.get_membership_list(400, {})
def test_bad_team_id(self):
self.get_membership_list(404, {'team_id': 'no_such_team'})
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'user'})
self.verify_expanded_private_user(result['results'][0]['user'])
def test_expand_public_user(self):
result = self.get_membership_list(
200,
{'team_id': self.public_profile_team.team_id, 'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['user'])
def test_expand_team(self):
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'team'})
self.verify_expanded_team(result['results'][0]['team'])
@ddt.ddt
class TestCreateMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 404),
('student_enrolled_both_courses_other_team', 404),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.post_create_membership(
status,
self.build_membership_data('student_enrolled_not_on_team', self.solar_team),
user=user
)
if status == 200:
self.assertEqual(membership['user']['username'], self.users['student_enrolled_not_on_team'].username)
self.assertEqual(membership['team']['team_id'], self.solar_team.team_id)
memberships = self.get_membership_list(200, {'team_id': self.solar_team.team_id})
self.assertEqual(memberships['count'], 2)
add_method = 'joined_from_team_view' if user == 'student_enrolled_not_on_team' else 'added_by_another_user'
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled_not_on_team'].id,
add_method=add_method
)
else:
self.assert_no_events_were_emitted()
def test_no_username(self):
response = self.post_create_membership(400, {'team_id': self.solar_team.team_id})
self.assertIn('username', json.loads(response.content)['field_errors'])
def test_no_team(self):
response = self.post_create_membership(400, {'username': self.users['student_enrolled_not_on_team'].username})
self.assertIn('team_id', json.loads(response.content)['field_errors'])
def test_bad_team(self):
self.post_create_membership(
404,
self.build_membership_data_raw(self.users['student_enrolled'].username, 'no_such_team')
)
def test_bad_username(self):
self.post_create_membership(
404,
self.build_membership_data_raw('no_such_user', self.solar_team.team_id),
user='staff'
)
@ddt.data('student_enrolled', 'staff', 'course_staff')
def test_join_twice(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled', self.solar_team),
user=user
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
def test_join_second_team_in_course(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_both_courses_other_team', self.solar_team),
user='student_enrolled_both_courses_other_team'
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
@ddt.data('staff', 'course_staff')
def test_not_enrolled_in_team_course(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_unenrolled', self.solar_team),
user=user
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
def test_over_max_team_size_in_course_2(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_other_course_not_on_team', self.another_team),
user='student_enrolled_other_course_not_on_team'
)
self.assertIn('full', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
"""Test cases for the membership detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.get_membership_detail('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.get_membership_detail(self.solar_team.team_id, 'no_such_user', 404)
def test_no_membership(self):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled_not_on_team'].username,
404
)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'user'}
)
self.verify_expanded_private_user(result['user'])
def test_expand_public_user(self):
result = self.get_membership_detail(
self.public_profile_team.team_id,
self.users['student_enrolled_public_profile'].username,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['user'])
def test_expand_team(self):
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'team'}
)
self.verify_expanded_team(result['team'])
@ddt.ddt
class TestDeleteMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership deletion endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 404),
('student_enrolled', 204),
('staff', 204),
('course_staff', 204),
('community_ta', 204),
)
@ddt.unpack
def test_access(self, user, status):
self.delete_membership(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
if status == 204:
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='removed_by_admin'
)
else:
self.assert_no_events_were_emitted()
def test_leave_team(self):
"""
The key difference between this test and test_access above is that
removal via "Edit Membership" and "Leave Team" emit different events
despite hitting the same API endpoint, due to the 'admin' query string.
"""
url = reverse('team_membership_detail', args=[self.solar_team.team_id, self.users['student_enrolled'].username])
self.make_call(url, 204, 'delete', user='student_enrolled')
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='self_removal'
)
def test_bad_team(self):
self.delete_membership('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.delete_membership(self.solar_team.team_id, 'no_such_user', 404)
def test_missing_membership(self):
self.delete_membership(self.wind_team.team_id, self.users['student_enrolled'].username, 404)
class TestElasticSearchErrors(TeamAPITestCase):
"""Test that the Team API is robust to Elasticsearch connection errors."""
ES_ERROR = ConnectionError('N/A', 'connection error', {})
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_list_teams(self, __):
"""Test that text searches return a 503 when Elasticsearch is down.
The endpoint should still return 200 when a search is not supplied."""
self.get_teams_list(
expected_status=503,
data={'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
user='staff'
)
self.get_teams_list(
expected_status=200,
data={'course_id': self.test_course_1.id},
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_create_team(self, __):
"""Test that team creation is robust to Elasticsearch errors."""
self.post_create_team(
expected_status=200,
data=self.build_team_data(name='zoinks'),
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_delete_team(self, __):
"""Test that team deletion is robust to Elasticsearch errors."""
self.delete_team(self.wind_team.team_id, 204, user='staff')
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_patch_team(self, __):
"""Test that team updates are robust to Elasticsearch errors."""
self.patch_team_detail(
self.wind_team.team_id,
200,
data={'description': 'new description'},
user='staff'
)
|
mhbu50/erpnext
|
refs/heads/develop
|
erpnext/stock/report/stock_ageing/__init__.py
|
12133432
| |
crcresearch/osf.io
|
refs/heads/develop
|
addons/twofactor/migrations/__init__.py
|
12133432
| |
cousteaulecommandant/youtube-dl
|
refs/heads/master
|
test/__init__.py
|
12133432
| |
gogogo/gogogo-hk
|
refs/heads/master
|
gogogo/__init__.py
|
12133432
| |
sgput/portfolio-wptheme
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py
|
1843
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
rickerc/nova_audit
|
refs/heads/cis-havana-staging
|
nova/virt/vmwareapi/network_util.py
|
11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for ESX Networking.
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""
Gets reference to the network whose name is passed as the
argument.
"""
host = vm_util.get_host_ref(session, cluster)
if cluster is not None:
vm_networks_ret = session._call_method(vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource",
"network")
else:
vm_networks_ret = session._call_method(vim_util,
"get_dynamic_property", host,
"HostSystem", "network")
# Meaning there are no networks on the host. suds responds with a ""
# in the parent property field rather than a [] in the
# ManagedObjectReference property field of the parent
if not vm_networks_ret:
return None
vm_networks = vm_networks_ret.ManagedObjectReference
network_obj = {}
LOG.debug(vm_networks)
for network in vm_networks:
# Get network properties
if network._type == 'DistributedVirtualPortgroup':
props = session._call_method(vim_util,
"get_dynamic_property", network,
"DistributedVirtualPortgroup", "config")
# NOTE(asomya): This only works on ESXi if the port binding is
# set to ephemeral
if props.name == network_name:
network_obj['type'] = 'DistributedVirtualPortgroup'
network_obj['dvpg'] = props.key
dvs_props = session._call_method(vim_util,
"get_dynamic_property",
props.distributedVirtualSwitch,
"VmwareDistributedVirtualSwitch", "uuid")
network_obj['dvsw'] = dvs_props
else:
props = session._call_method(vim_util,
"get_dynamic_property", network,
"Network", "summary.name")
if props == network_name:
network_obj['type'] = 'Network'
network_obj['name'] = network_name
if (len(network_obj) > 0):
return network_obj
def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""
Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.vswitch")
# Meaning there are no vSwitches on the host. Shouldn't be the case,
# but just doing code check
if not vswitches_ret:
return
vswitches = vswitches_ret.HostVirtualSwitch
# Get the vSwitch associated with the network adapter
for elem in vswitches:
try:
for nic_elem in elem.pnic:
if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
return elem.name
# Catching Attribute error as a vSwitch may not be associated with a
# physical NIC.
except AttributeError:
pass
def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_interface exists on the esx host."""
host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.pnic")
# Meaning there are no physical nics on the host
if not physical_nics_ret:
return False
physical_nics = physical_nics_ret.PhysicalNic
for pnic in physical_nics:
if vlan_interface == pnic.device:
return True
return False
def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswicth associated with the port group."""
host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.portgroup")
if not port_grps_on_host_ret:
msg = _("ESX SOAP server returned an empty port group "
"for the host system in its response")
LOG.error(msg)
raise exception.NovaException(msg)
port_grps_on_host = port_grps_on_host_ret.HostPortGroup
for p_gp in port_grps_on_host:
if p_gp.spec.name == pg_name:
p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
return p_gp.spec.vlanId, p_grp_vswitch_name
def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""
Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
"""
client_factory = session._get_vim().client.factory
add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
client_factory,
vswitch_name,
pg_name,
vlan_id)
host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "configManager.networkSystem")
LOG.debug(_("Creating Port Group with name %s on "
"the ESX host") % pg_name)
try:
session._call_method(session._get_vim(),
"AddPortGroup", network_system_mor,
portgrp=add_prt_grp_spec)
except error_util.VimFaultException as exc:
# There can be a race condition when two instances try
# adding port groups at the same time. One succeeds, then
# the other one will get an exception. Since we are
# concerned with the port group being created, which is done
# by the other call, we can ignore the exception.
if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
raise exception.NovaException(exc)
LOG.debug(_("Created Port Group with name %s on "
"the ESX host") % pg_name)
|
dfalt974/SickRage
|
refs/heads/master
|
lib/pbr/extra_files.py
|
145
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils import errors
import os
_extra_files = []
def get_extra_files():
global _extra_files
return _extra_files
def set_extra_files(extra_files):
# Let's do a sanity check
for filename in extra_files:
if not os.path.exists(filename):
raise errors.DistutilsFileError(
'%s from the extra_files option in setup.cfg does not '
'exist' % filename)
global _extra_files
_extra_files[:] = extra_files[:]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.