repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
FreeOpcUa/opcua-modeler | release.py | 4 | 1112 | import re
import os
def bump_version():
with open("setup.py") as f:
s = f.read()
m = re.search(r'version="(.*)\.(.*)\.(.*)",', s)
v1, v2, v3 = m.groups()
oldv = "{}.{}.{}".format(v1, v2, v3)
newv = "{}.{}.{}".format(v1, v2, str(int(v3) + 1))
print("Current version is: {}, write new version, ctrl-c to exit".format(oldv))
ans = input(newv)
if ans:
newv = ans
s = s.replace(oldv, newv)
with open("setup.py", "w") as f:
f.write(s)
return newv
def release():
v = bump_version()
ans = input("version bumped, commiting?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git add setup.py")
os.system("git commit -m 'new release'")
os.system("git tag {}".format(v))
ans = input("change committed, push to server?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git push")
os.system("git push --tags")
ans = input("upload to pip?(Y/n)")
if ans in ("", "y", "yes"):
os.system("python setup.py sdist upload")
if __name__ == "__main__":
release()
| gpl-3.0 |
chjw8016/GreenOdoo7-haibao | openerp/addons/base_vat/__openerp__.py | 125 | 2928 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'VAT Number Validation',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
VAT validation for Partner's VAT numbers.
=========================================
After installing this module, values entered in the VAT field of Partners will
be validated for all supported countries. The country is inferred from the
2-letter country code that prefixes the VAT number, e.g. ``BE0477472701``
will be validated using the Belgian rules.
There are two different levels of VAT number validation:
--------------------------------------------------------
* By default, a simple off-line check is performed using the known validation
rules for the country, usually a simple check digit. This is quick and
always available, but allows numbers that are perhaps not truly allocated,
or not valid anymore.
* When the "VAT VIES Check" option is enabled (in the configuration of the user's
Company), VAT numbers will be instead submitted to the online EU VIES
database, which will truly verify that the number is valid and currently
allocated to a EU company. This is a little bit slower than the simple
off-line check, requires an Internet connection, and may not be available
all the time. If the service is not available or does not support the
requested country (e.g. for non-EU countries), a simple check will be performed
instead.
Supported countries currently include EU countries, and a few non-EU countries
such as Chile, Colombia, Mexico, Norway or Russia. For unsupported countries,
only the country code will be validated.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'website': 'http://www.openerp.com',
'data': ['base_vat_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_partner_vat.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
yxl/emscripten-calligra-mobile | 3rdparty/google-breakpad/src/tools/gyp/test/generator-output/gyptest-relocate.py | 74 | 1688 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a project hierarchy created with the --generator-output=
option can be built even when it's relocated to a different path.
"""
import TestGyp
# Ninja and Android don't support --generator-output.
test = TestGyp.TestGyp(formats=['!ninja', '!android'])
test.writable(test.workpath('src'), False)
test.run_gyp('prog1.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src')
test.writable(test.workpath('src'), True)
test.relocate('src', 'relocate/src')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/src'), False)
test.writable(test.workpath('relocate/src/build'), True)
test.writable(test.workpath('relocate/src/subdir2/build'), True)
test.writable(test.workpath('relocate/src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='relocate/gypfiles')
chdir = 'relocate/gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'relocate/src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
| gpl-2.0 |
touilleMan/mongoengine | tests/test_context_managers.py | 31 | 6351 | import sys
sys.path[0:0] = [""]
import unittest
from mongoengine import *
from mongoengine.connection import get_db
from mongoengine.context_managers import (switch_db, switch_collection,
no_sub_classes, no_dereference,
query_counter)
class ContextManagersTest(unittest.TestCase):
def test_switch_db_context_manager(self):
connect('mongoenginetest')
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
Group(name="hello").save()
self.assertEqual(1, Group.objects.count())
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
self.assertEqual(1, Group.objects.count())
def test_switch_collection_context_manager(self):
connect('mongoenginetest')
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_collection(Group, 'group1') as Group:
Group.drop_collection()
Group(name="hello - group").save()
self.assertEqual(1, Group.objects.count())
with switch_collection(Group, 'group1') as Group:
self.assertEqual(0, Group.objects.count())
Group(name="hello - group1").save()
self.assertEqual(1, Group.objects.count())
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
self.assertEqual(1, Group.objects.count())
def test_no_dereference_context_manager_object_id(self):
"""Ensure that DBRef items in ListFields aren't dereferenced.
"""
connect('mongoenginetest')
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=False)
generic = GenericReferenceField()
members = ListField(ReferenceField(User, dbref=False))
User.drop_collection()
Group.drop_collection()
for i in xrange(1, 51):
User(name='user %s' % i).save()
user = User.objects.first()
Group(ref=user, members=User.objects, generic=user).save()
with no_dereference(Group) as NoDeRefGroup:
self.assertTrue(Group._fields['members']._auto_dereference)
self.assertFalse(NoDeRefGroup._fields['members']._auto_dereference)
with no_dereference(Group) as Group:
group = Group.objects.first()
self.assertTrue(all([not isinstance(m, User)
for m in group.members]))
self.assertFalse(isinstance(group.ref, User))
self.assertFalse(isinstance(group.generic, User))
self.assertTrue(all([isinstance(m, User)
for m in group.members]))
self.assertTrue(isinstance(group.ref, User))
self.assertTrue(isinstance(group.generic, User))
def test_no_dereference_context_manager_dbref(self):
"""Ensure that DBRef items in ListFields aren't dereferenced.
"""
connect('mongoenginetest')
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=True)
generic = GenericReferenceField()
members = ListField(ReferenceField(User, dbref=True))
User.drop_collection()
Group.drop_collection()
for i in xrange(1, 51):
User(name='user %s' % i).save()
user = User.objects.first()
Group(ref=user, members=User.objects, generic=user).save()
with no_dereference(Group) as NoDeRefGroup:
self.assertTrue(Group._fields['members']._auto_dereference)
self.assertFalse(NoDeRefGroup._fields['members']._auto_dereference)
with no_dereference(Group) as Group:
group = Group.objects.first()
self.assertTrue(all([not isinstance(m, User)
for m in group.members]))
self.assertFalse(isinstance(group.ref, User))
self.assertFalse(isinstance(group.generic, User))
self.assertTrue(all([isinstance(m, User)
for m in group.members]))
self.assertTrue(isinstance(group.ref, User))
self.assertTrue(isinstance(group.generic, User))
def test_no_sub_classes(self):
class A(Document):
x = IntField()
y = IntField()
meta = {'allow_inheritance': True}
class B(A):
z = IntField()
class C(B):
zz = IntField()
A.drop_collection()
A(x=10, y=20).save()
A(x=15, y=30).save()
B(x=20, y=40).save()
B(x=30, y=50).save()
C(x=40, y=60).save()
self.assertEqual(A.objects.count(), 5)
self.assertEqual(B.objects.count(), 3)
self.assertEqual(C.objects.count(), 1)
with no_sub_classes(A) as A:
self.assertEqual(A.objects.count(), 2)
for obj in A.objects:
self.assertEqual(obj.__class__, A)
with no_sub_classes(B) as B:
self.assertEqual(B.objects.count(), 2)
for obj in B.objects:
self.assertEqual(obj.__class__, B)
with no_sub_classes(C) as C:
self.assertEqual(C.objects.count(), 1)
for obj in C.objects:
self.assertEqual(obj.__class__, C)
# Confirm context manager exit correctly
self.assertEqual(A.objects.count(), 5)
self.assertEqual(B.objects.count(), 3)
self.assertEqual(C.objects.count(), 1)
def test_query_counter(self):
connect('mongoenginetest')
db = get_db()
db.test.find({})
with query_counter() as q:
self.assertEqual(0, q)
for i in xrange(1, 51):
db.test.find({}).count()
self.assertEqual(50, q)
if __name__ == '__main__':
unittest.main()
| mit |
zenodo/invenio | invenio/legacy/websubmit/functions/Create_Upload_Files_Interface.py | 13 | 23083 | # $Id: Revise_Files.py,v 1.37 2009/03/26 15:11:05 jerome Exp $
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Displays a generic interface to upload, delete
and revise files.
To be used on par with Move_Uploaded_Files_to_Storage function:
- Create_Upload_Files_Interface records the actions performed by user.
- Move_Uploaded_Files_to_Storage execute the recorded actions.
NOTE:
=====
- Due to the way WebSubmit works, this function can only work when
positionned at step 1 in WebSubmit admin, and
Move_Uploaded_Files_to_Storage is at step 2
FIXME:
======
- One issue: if we allow deletion or renaming, we might lose track of
a bibdoc: someone adds X, renames X->Y, and adds again another file
with name X: when executing actions, we will add the second X, and
rename it to Y
-> need to go back in previous action when renaming... or check
that name has never been used..
"""
__revision__ = "$Id$"
import os
from invenio.config import \
CFG_SITE_LANG
from invenio.base.i18n import gettext_set_language, wash_language
from invenio.legacy.bibdocfile.managedocfiles import create_file_upload_interface
def Create_Upload_Files_Interface(parameters, curdir, form, user_info=None):
"""
List files for revisions.
You should use Move_Uploaded_Files_to_Storage.py function in your
submission to apply the changes performed by users with this
interface.
@param parameters:(dictionary) - must contain:
+ maxsize: the max size allowed for uploaded files
+ minsize: the max size allowed for uploaded files
+ doctypes: the list of doctypes (like 'Main' or 'Additional')
and their description that users can choose from
when adding new files.
- When no value is provided, users cannot add new
file (they can only revise/delete/add format)
- When a single value is given, it is used as
default doctype for all new documents
Eg:
main=Main document|additional=Figure, schema. etc
('=' separates doctype and description
'|' separates each doctype/description group)
+ restrictions: the list of restrictions (like 'Restricted' or
'No Restriction') and their description that
users can choose from when adding/revising
files. Restrictions can then be configured at
the level of WebAccess.
- When no value is provided, no restriction is
applied
- When a single value is given, it is used as
default resctriction for all documents.
- The first value of the list is used as default
restriction if the user if not given the
choice of the restriction. CHOOSE THE ORDER!
Eg:
=No restriction|restr=Restricted
('=' separates restriction and description
'|' separates each restriction/description group)
+ canDeleteDoctypes: the list of doctypes that users are
allowed to delete.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canReviseDoctypes: the list of doctypes that users are
allowed to revise
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canDescribeDoctypes: the list of doctypes that users are
allowed to describe
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canCommentDoctypes: the list of doctypes that users are
allowed to comment
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canKeepDoctypes: the list of doctypes for which users can
choose to keep previous versions visible when
revising a file (i.e. 'Keep previous version'
checkbox). See also parameter 'keepDefault'.
Note that this parameter is ~ignored when
revising the attributes of a file (comment,
description) without uploading a new
file. See also parameter
Move_Uploaded_Files_to_Storage.forceFileRevision
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canAddFormatDoctypes: the list of doctypes for which users can
add new formats. If there is no value,
then no 'add format' link nor warning
about losing old formats are displayed.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canRestrictDoctypes: the list of doctypes for which users can
choose the access restrictions when adding or
revising a file. If no value is given:
- no restriction is applied if none is defined
in the 'restrictions' parameter.
- else the *first* value of the 'restrictions'
parameter is used as default restriction.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canRenameDoctypes: the list of doctypes that users are allowed
to rename (when revising)
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canNameNewFiles: if user can choose the name of the files they
upload (1) or not (0)
+ defaultFilenameDoctypes: Rename uploaded files to admin-chosen
values. List here the the files in
current submission directory that
contain the names to use for each doctype.
Eg:
Main=RN|Additional=additional_filename
('=' separates doctype and file in curdir
'|' separates each doctype/file group).
If the same doctype is submitted
several times, a"-%i" suffix is added
to the name defined in the file.
The default filenames are overriden
by user-chosen names if you allow
'canNameNewFiles' or
'canRenameDoctypes'.
+ maxFilesDoctypes: the maximum number of files that users can
upload for each doctype.
Eg:
Main=1|Additional=2
('|' separated values)
Do not specify the doctype here to have an
unlimited number of files for a given
doctype.
+ createRelatedFormats: if uploaded files get converted to
whatever format we can (1) or not (0)
+ deferRelatedFormatsCreation: if creation of related format is
scheduled to be run later,
offline (1, default) or
immediately/online just after the
user has uploaded the file
(0). Setting immediate conversion
enables workflows to process the
created files in following
functions, but "blocks" the user.
+ keepDefault: the default behaviour for keeping or not previous
version of files when users cannot choose (no
value in canKeepDoctypes): keep (1) or not (0)
Note that this parameter is ignored when revising
the attributes of a file (comment, description)
without uploading a new file. See also parameter
Move_Uploaded_Files_to_Storage.forceFileRevision
+ showLinks: if we display links to files (1) when possible or
not (0)
+ fileLabel: the label for the file field
+ filenameLabel: the label for the file name field
+ descriptionLabel: the label for the description field
+ commentLabel: the label for the comments field
+ restrictionLabel: the label in front of the restrictions list
+ startDoc: the name of a file in curdir that contains some
text/markup to be printed *before* the file revision
box
+ endDoc: the name of a file in curdir that contains some
text/markup to be printed *after* the file revision
box
"""
global sysno
ln = wash_language(form['ln'])
_ = gettext_set_language(ln)
out = ''
## Fetch parameters defined for this function
(minsize, maxsize, doctypes_and_desc, doctypes,
can_delete_doctypes, can_revise_doctypes, can_describe_doctypes,
can_comment_doctypes, can_keep_doctypes, can_rename_doctypes,
can_add_format_to_doctypes, createRelatedFormats_p,
can_name_new_files, keep_default, show_links, file_label,
filename_label, description_label, comment_label, startDoc,
endDoc, restrictions_and_desc, can_restrict_doctypes,
restriction_label, doctypes_to_default_filename,
max_files_for_doctype, deferRelatedFormatsCreation_p) = \
wash_function_parameters(parameters, curdir, ln)
try:
recid = int(sysno)
except:
recid = None
out += '<center>'
out += startDoc
out += create_file_upload_interface(recid,
form=form,
print_outside_form_tag=True,
print_envelope=True,
include_headers=True,
ln=ln,
minsize=minsize, maxsize=maxsize,
doctypes_and_desc=doctypes_and_desc,
can_delete_doctypes=can_delete_doctypes,
can_revise_doctypes=can_revise_doctypes,
can_describe_doctypes=can_describe_doctypes,
can_comment_doctypes=can_comment_doctypes,
can_keep_doctypes=can_keep_doctypes,
can_rename_doctypes=can_rename_doctypes,
can_add_format_to_doctypes=can_add_format_to_doctypes,
create_related_formats=createRelatedFormats_p,
can_name_new_files=can_name_new_files,
keep_default=keep_default, show_links=show_links,
file_label=file_label, filename_label=filename_label,
description_label=description_label, comment_label=comment_label,
restrictions_and_desc=restrictions_and_desc,
can_restrict_doctypes=can_restrict_doctypes,
restriction_label=restriction_label,
doctypes_to_default_filename=doctypes_to_default_filename,
max_files_for_doctype=max_files_for_doctype,
sbm_indir=None, sbm_doctype=None, sbm_access=None,
uid=None, sbm_curdir=curdir,
defer_related_formats_creation=deferRelatedFormatsCreation_p)[1]
out += endDoc
out += '</center>'
return out
def wash_function_parameters(parameters, curdir, ln=CFG_SITE_LANG):
"""
Returns the functions (admin-defined) parameters washed and
initialized properly, as a tuple:
Parameters:
check Create_Upload_Files_Interface(..) docstring
Returns:
tuple (minsize, maxsize, doctypes_and_desc, doctypes,
can_delete_doctypes, can_revise_doctypes,
can_describe_doctypes can_comment_doctypes, can_keep_doctypes,
can_rename_doctypes, can_add_format_to_doctypes,
createRelatedFormats_p, can_name_new_files, keep_default,
show_links, file_label, filename_label, description_label,
comment_label, startDoc, endDoc, access_restrictions_and_desc,
can_restrict_doctypes, restriction_label,
doctypes_to_default_filename, max_files_for_doctype,
deferRelatedFormatsCreation_p)
"""
_ = gettext_set_language(ln)
# The min and max files sizes that users can upload
minsize = parameters['minsize']
maxsize = parameters['maxsize']
# The list of doctypes + description that users can select when
# adding new files. If there are no values, then user cannot add
# new files. '|' is used to separate doctypes groups, and '=' to
# separate doctype and description. Eg:
# main=Main document|additional=Figure, schema. etc
doctypes_and_desc = [doctype.strip().split("=") for doctype \
in parameters['doctypes'].split('|') \
if doctype.strip() != '']
doctypes = [doctype for (doctype, desc) in doctypes_and_desc]
doctypes_and_desc = [[doctype, _(desc)] for \
(doctype, desc) in doctypes_and_desc]
# The list of doctypes users are allowed to delete
# (list of values separated by "|")
can_delete_doctypes = [doctype.strip() for doctype \
in parameters['canDeleteDoctypes'].split('|') \
if doctype.strip() != '']
# The list of doctypes users are allowed to revise
# (list of values separated by "|")
can_revise_doctypes = [doctype.strip() for doctype \
in parameters['canReviseDoctypes'].split('|') \
if doctype.strip() != '']
# The list of doctypes users are allowed to describe
# (list of values separated by "|")
can_describe_doctypes = [doctype.strip() for doctype \
in parameters['canDescribeDoctypes'].split('|') \
if doctype.strip() != '']
# The list of doctypes users are allowed to comment
# (list of values separated by "|")
can_comment_doctypes = [doctype.strip() for doctype \
in parameters['canCommentDoctypes'].split('|') \
if doctype.strip() != '']
# The list of doctypes for which users are allowed to decide
# if they want to keep old files or not when revising
# (list of values separated by "|")
can_keep_doctypes = [doctype.strip() for doctype \
in parameters['canKeepDoctypes'].split('|') \
if doctype.strip() != '']
# The list of doctypes users are allowed to rename
# (list of values separated by "|")
can_rename_doctypes = [doctype.strip() for doctype \
in parameters['canRenameDoctypes'].split('|') \
if doctype.strip() != '']
# The mapping from doctype to default filename.
# '|' is used to separate doctypes groups, and '=' to
# separate doctype and file in curdir where the default name is. Eg:
# main=main_filename|additional=additional_filename. etc
default_doctypes_and_curdir_files = [doctype.strip().split("=") for doctype \
in parameters['defaultFilenameDoctypes'].split('|') \
if doctype.strip() != '']
doctypes_to_default_filename = {}
for doctype, curdir_file in default_doctypes_and_curdir_files:
default_filename = read_file(curdir, curdir_file)
if default_filename:
doctypes_to_default_filename[doctype] = os.path.basename(default_filename)
# The maximum number of files that can be uploaded for each doctype
# Eg:
# main=1|additional=3
doctypes_and_max_files = [doctype.strip().split("=") for doctype \
in parameters['maxFilesDoctypes'].split('|') \
if doctype.strip() != '']
max_files_for_doctype = {}
for doctype, max_files in doctypes_and_max_files:
if max_files.isdigit():
max_files_for_doctype[doctype] = int(max_files)
# The list of doctypes for which users are allowed to add new formats
# (list of values separated by "|")
can_add_format_to_doctypes = [doctype.strip() for doctype \
in parameters['canAddFormatDoctypes'].split('|') \
if doctype.strip() != '']
# The list of access restrictions + description that users can
# select when adding new files. If there are no values, no
# restriction is applied . '|' is used to separate access
# restrictions groups, and '=' to separate access restriction and
# description. Eg: main=Main document|additional=Figure,
# schema. etc
access_restrictions_and_desc = [access.strip().split("=") for access \
in parameters['restrictions'].split('|') \
if access.strip() != '']
access_restrictions_and_desc = [[access, _(desc)] for \
(access, desc) in access_restrictions_and_desc]
# The list of doctypes users are allowed to restrict
# (list of values separated by "|")
can_restrict_doctypes = [restriction.strip() for restriction \
in parameters['canRestrictDoctypes'].split('|') \
if restriction.strip() != '']
# If we should create additional formats when applicable (1) or
# not (0)
try:
createRelatedFormats_p = bool(int(parameters['createRelatedFormats']))
except ValueError as e:
createRelatedFormats_p = False
# If we should create additional formats right now (1) or
# later (0)
try:
deferRelatedFormatsCreation_p = bool(int(parameters['deferRelatedFormatsCreation']))
except ValueError, e:
deferRelatedFormatsCreation_p = True
# If users can name the files they add
# Value should be 0 (Cannot rename) or 1 (Can rename)
try:
can_name_new_files = int(parameters['canNameNewFiles'])
except ValueError as e:
can_name_new_files = False
# The default behaviour wrt keeping previous files or not.
# 0 = do not keep, 1 = keep
try:
keep_default = int(parameters['keepDefault'])
except ValueError as e:
keep_default = False
# If we display links to files (1) or not (0)
try:
show_links = int(parameters['showLinks'])
except ValueError as e:
show_links = True
file_label = parameters['fileLabel']
if file_label == "":
file_label = _('Choose a file')
filename_label = parameters['filenameLabel']
if filename_label == "":
filename_label = _('Name')
description_label = parameters['descriptionLabel']
if description_label == "":
description_label = _('Description')
comment_label = parameters['commentLabel']
if comment_label == "":
comment_label = _('Comment')
restriction_label = parameters['restrictionLabel']
if restriction_label == "":
restriction_label = _('Access')
startDoc = parameters['startDoc']
endDoc = parameters['endDoc']
prefix = read_file(curdir, startDoc)
if prefix is None:
prefix = ""
suffix = read_file(curdir, endDoc)
if suffix is None:
suffix = ""
return (minsize, maxsize, doctypes_and_desc, doctypes,
can_delete_doctypes, can_revise_doctypes,
can_describe_doctypes, can_comment_doctypes,
can_keep_doctypes, can_rename_doctypes,
can_add_format_to_doctypes, createRelatedFormats_p,
can_name_new_files, keep_default, show_links, file_label,
filename_label, description_label, comment_label,
prefix, suffix, access_restrictions_and_desc,
can_restrict_doctypes, restriction_label,
doctypes_to_default_filename, max_files_for_doctype,
deferRelatedFormatsCreation_p)
def read_file(curdir, filename):
"""
Reads a file in curdir.
Returns None if does not exist, cannot be read, or if file is not
really in curdir
"""
try:
file_path = os.path.abspath(os.path.join(curdir, filename))
if not file_path.startswith(curdir):
return None
file_desc = file(file_path, 'r')
content = file_desc.read()
file_desc.close()
except:
content = None
return content
| gpl-2.0 |
nicferrier/pyproxyfs | src/pyproxyfs/__init__.py | 1 | 6159 | # pyproxyfs - a very lightweight proxy filesystem class
# Copyright (C) 2010 Nic Ferrier <nic@ferrier.me.uk>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
class Filesystem(object):
"""Base filesystem interface also the base implementation.
The implementation talks very simply to the normal Python functions.
"""
def rename(self, oldpath, newpath):
os.rename(oldpath, newpath)
def remove(self, path):
os.remove(path)
def listdir(self, path):
return os.listdir(path)
def open(self, path, mode="r"):
return open(path, mode)
def iglob(self, path):
import glob
return glob.iglob(path)
def glob(self, path):
return list(self.iglob(path))
def exists(self, path):
return os.path.exists(path)
def isdir(self, path):
return os.path.isdir(path)
def _mergedict(a, b):
"""Recusively merge the 2 dicts.
Destructive on argument 'a'.
"""
for p, d1 in b.items():
if p in a:
if not isinstance(d1, dict):
continue
_mergedict(a[p], d1)
else:
a[p] = d1
return a
class TestFS(Filesystem):
def __init__(self, data):
super(TestFS, self).__init__()
# 'paths' is pretty much what is passed in
self.paths = data
# 'files' is the decomposed paths -> json structure
# eg: "/a/b" is stored as a key "a" with a dict containing a key "b":
# {"a": {"b": "filecontent"}}
self.files = {}
# Make the path: object into a nested dict setup
for name,data in data.items():
paths = name.split("/")
if paths[0] == "":
paths = paths[1:]
d = {}
d[paths[-1]] = data
for p in reversed(paths[:-1]):
d = { p: d }
_mergedict(self.files, d)
def open(self, path, mode=None):
path = path.split("/")
if path[0] == "":
path = path[1:]
d = self.files
for p in path:
if not p:
continue
d = d[p]
obj = d
class grd():
def __enter__(self):
return self
def __exit__(self, type, values, traceback):
pass
def read(self):
return obj
def readline(self, size=-1):
if not getattr(self, "lineno", False):
setattr(self, "lineno", 0)
lines = obj.split("\n")
if self.lineno == len(lines):
return "\n"
if self.lineno > len(lines):
raise IOError()
line = lines[self.lineno]
self.lineno = self.lineno + 1
return "%s\n" % line
return grd()
def rename(self, old, new):
path = old.split("/")
if path[0] == "":
path = path[1:]
d = self.files
lastd = None
for p in path:
lastd = d
d = d[p]
del lastd[p]
obj = d
np = new.split("/")
if np[0] == "":
np = np[1:]
d = {}
d[np[-1]] = obj
for p in reversed(np[:-1]):
d = { p: d }
_mergedict(self.files, d)
return self
def remove(self, path):
"""Deletes just the end point"""
def _path_find(path_parts, fs):
for p,f in fs.items():
if p == path_parts[0]:
if len(path_parts) == 1:
del fs[p]
return
else:
return _path_find(path_parts[1:], f)
raise KeyError()
pt = path.split("/")
return _path_find(pt if pt[0] != "" else pt[1:], self.files)
def _listdir(self, path):
if path == ".":
for i in self.files:
yield i
else:
paths = path.split("/")
if paths[0] == "":
paths = paths[1:]
d = self.files
for p in paths:
d = d[p]
for i in d:
yield i
def listdir(self, path):
return list(self._listdir(path))
def iglob(self, path):
import fnmatch
for p in sorted(self.paths.keys()):
if fnmatch.fnmatch(p, path):
yield p
def _path(self, path):
"""Functional/recursive path finder.
Raises KeyError if the path is not found
"""
def _path_find(path_parts, fs):
for p,f in fs.items():
if p == path_parts[0]:
if len(path_parts) == 1:
return f
else:
return _path_find(path_parts[1:], f)
raise KeyError()
pt = path.split("/")
return _path_find(pt if pt[0] != "" else pt[1:], self.files)
def exists(self, path):
"""Functional (recursive) exists on the path structures"""
try:
self._path(path)
except KeyError:
return False
else:
return True
def isdir(self, path):
"""Is the path a directory?
A path is a directory if it holds a dictionary.
"""
if self.exists(path):
content = self._path(path)
# Not sure this is the best attribute to check for.
return hasattr(content, "keys")
return False
# End
| gpl-3.0 |
trosa/forca | fcgihandler.py | 2 | 1504 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a handler for lighttpd+fastcgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.port = 8000
server.bind = '127.0.0.1'
server.event-handler = 'freebsd-kqueue'
server.modules = ('mod_rewrite', 'mod_fastcgi')
server.error-handler-404 = '/test.fcgi'
server.document-root = '/somewhere/web2py'
server.errorlog = '/tmp/error.log'
fastcgi.server = ('.fcgi' =>
('localhost' =>
('min-procs' => 1,
'socket' => '/tmp/fcgi.sock'
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
import gluon.contrib.gateways.fcgi as fcgi
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
| gpl-2.0 |
maartenq/ansible | test/units/modules/network/nso/test_nso_show.py | 12 | 4300 | #
# Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nso import nso_show
from . import nso_module
from .nso_module import MockResponse
from units.modules.utils import set_module_args
class TestNsoShow(nso_module.TestNsoModule):
module = nso_show
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_show_missing(self, open_url_mock):
path = '/ncs:devices/device{ce0}/missing'
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5"}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('show_config',
{'path': path, 'result_as': 'json'}, 200,
'{"error": {"data": {"param": "path"}, "type": "rpc.method.invalid_params"}}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc',
'path': path
})
self.execute_module(failed=True, msg='NSO show_config invalid params. path = /ncs:devices/device{ce0}/missing')
self.assertEqual(0, len(calls))
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_show_config(self, open_url_mock):
path = '/ncs:devices/device{ce0}'
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5"}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('show_config', {'path': path, 'result_as': 'json'}, 200, '{"result": {"data": {}}}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc',
'path': path,
'operational': False
})
self.execute_module(changed=False, output={"data": {}})
self.assertEqual(0, len(calls))
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_show_config_and_oper(self, open_url_mock):
path = '/ncs:devices/device{ce0}/sync-from'
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5"}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('show_config', {'path': path, 'result_as': 'json'}, 200, '{"result": {"data": {}}}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc',
'path': path,
'operational': True
})
self.execute_module(changed=False, output={"data": {}})
self.assertEqual(0, len(calls))
| gpl-3.0 |
bob60/DHT-sensors-python3 | examples/AdafruitDHT.py | 1 | 2171 | #!/usr/bin/python3
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('usage: sudo ./Adafruit_DHT.py [11|22|2302] GPIOpin#')
print('example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
| mit |
blaquee/crits | crits/core/class_mapper.py | 8 | 9178 | from bson.objectid import ObjectId
__obj_type_to_key_descriptor__ = {
'Actor': 'name',
'Backdoor': 'id',
'Campaign': 'name',
'Certificate': 'md5',
'Comment': 'object_id',
'Domain': 'domain',
'Email': 'id',
'Event': 'id',
'Exploit': 'id',
'Indicator': 'id',
'IP': 'ip',
'PCAP': 'md5',
'RawData': 'title',
'Sample': 'md5',
'Target': 'email_address',
}
def class_from_id(type_, _id):
"""
Return an instantiated class object.
:param type_: The CRITs top-level object type.
:type type_: str
:param _id: The ObjectId to search for.
:type _id: str
:returns: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
"""
# doing this to avoid circular imports
from crits.actors.actor import ActorThreatIdentifier, Actor
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.core.source_access import SourceAccess
from crits.core.user_role import UserRole
from crits.domains.domain import Domain
from crits.emails.email import Email
from crits.events.event import Event
from crits.exploits.exploit import Exploit
from crits.indicators.indicator import Indicator, IndicatorAction
from crits.ips.ip import IP
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData, RawDataType
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.targets.target import Target
if not _id:
return None
# make sure it's a string
_id = str(_id)
# Use bson.ObjectId to make sure this is a valid ObjectId, otherwise
# the queries below will raise a ValidationError exception.
if not ObjectId.is_valid(_id.decode('utf8')):
return None
if type_ == 'Actor':
return Actor.objects(id=_id).first()
elif type_ == 'Backdoor':
return Backdoor.objects(id=_id).first()
elif type_ == 'ActorThreatIdentifier':
return ActorThreatIdentifier.objects(id=_id).first()
elif type_ == 'Campaign':
return Campaign.objects(id=_id).first()
elif type_ == 'Certificate':
return Certificate.objects(id=_id).first()
elif type_ == 'Comment':
return Comment.objects(id=_id).first()
elif type_ == 'Domain':
return Domain.objects(id=_id).first()
elif type_ == 'Email':
return Email.objects(id=_id).first()
elif type_ == 'Event':
return Event.objects(id=_id).first()
elif type_ == 'Exploit':
return Exploit.objects(id=_id).first()
elif type_ == 'Indicator':
return Indicator.objects(id=_id).first()
elif type_ == 'IndicatorAction':
return IndicatorAction.objects(id=_id).first()
elif type_ == 'IP':
return IP.objects(id=_id).first()
elif type_ == 'PCAP':
return PCAP.objects(id=_id).first()
elif type_ == 'RawData':
return RawData.objects(id=_id).first()
elif type_ == 'RawDataType':
return RawDataType.objects(id=_id).first()
elif type_ == 'Sample':
return Sample.objects(id=_id).first()
elif type_ == 'SourceAccess':
return SourceAccess.objects(id=_id).first()
elif type_ == 'Screenshot':
return Screenshot.objects(id=_id).first()
elif type_ == 'Target':
return Target.objects(id=_id).first()
elif type_ == 'UserRole':
return UserRole.objects(id=_id).first()
else:
return None
def key_descriptor_from_obj_type(obj_type):
return __obj_type_to_key_descriptor__.get(obj_type)
def class_from_value(type_, value):
"""
Return an instantiated class object.
:param type_: The CRITs top-level object type.
:type type_: str
:param value: The value to search for.
:type value: str
:returns: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
"""
# doing this to avoid circular imports
from crits.actors.actor import ActorThreatIdentifier, Actor
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.domains.domain import Domain
from crits.emails.email import Email
from crits.events.event import Event
from crits.exploits.exploit import Exploit
from crits.indicators.indicator import Indicator
from crits.ips.ip import IP
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.targets.target import Target
# Make sure value is a string...
value = str(value)
# Use bson.ObjectId to make sure this is a valid ObjectId, otherwise
# the queries below will raise a ValidationError exception.
if (type_ in ['Backdoor', 'Comment', 'Email', 'Event', 'Exploit',
'Indicator', 'Screenshot'] and
not ObjectId.is_valid(value.decode('utf8'))):
return None
if type_ == 'Actor':
return Actor.objects(name=value).first()
if type_ == 'Backdoor':
return Backdoor.objects(id=value).first()
elif type_ == 'ActorThreatIdentifier':
return ActorThreatIdentifier.objects(name=value).first()
elif type_ == 'Campaign':
return Campaign.objects(name=value).first()
elif type_ == 'Certificate':
return Certificate.objects(md5=value).first()
elif type_ == 'Comment':
return Comment.objects(id=value).first()
elif type_ == 'Domain':
return Domain.objects(domain=value).first()
elif type_ == 'Email':
return Email.objects(id=value).first()
elif type_ == 'Event':
return Event.objects(id=value).first()
elif type_ == 'Exploit':
return Exploit.objects(id=value).first()
elif type_ == 'Indicator':
return Indicator.objects(id=value).first()
elif type_ == 'IP':
return IP.objects(ip=value).first()
elif type_ == 'PCAP':
return PCAP.objects(md5=value).first()
elif type_ == 'RawData':
return RawData.objects(md5=value).first()
elif type_ == 'Sample':
return Sample.objects(md5=value).first()
elif type_ == 'Screenshot':
return Screenshot.objects(id=value).first()
elif type_ == 'Target':
target = Target.objects(email_address=value).first()
if target:
return target
else:
return Target.objects(email_address__iexact=value).first()
else:
return None
def class_from_type(type_):
"""
Return a class object.
:param type_: The CRITs top-level object type.
:type type_: str
:returns: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
"""
# doing this to avoid circular imports
from crits.actors.actor import ActorThreatIdentifier, Actor
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.core.source_access import SourceAccess
from crits.core.user_role import UserRole
from crits.domains.domain import Domain
from crits.emails.email import Email
from crits.events.event import Event
from crits.exploits.exploit import Exploit
from crits.indicators.indicator import Indicator, IndicatorAction
from crits.ips.ip import IP
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData, RawDataType
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.targets.target import Target
if type_ == 'Actor':
return Actor
elif type_ == 'ActorThreatIdentifier':
return ActorThreatIdentifier
elif type_ == 'Backdoor':
return Backdoor
elif type_ == 'Campaign':
return Campaign
elif type_ == 'Certificate':
return Certificate
elif type_ == 'Comment':
return Comment
elif type_ == 'Domain':
return Domain
elif type_ == 'Email':
return Email
elif type_ == 'Event':
return Event
elif type_ == 'Exploit':
return Exploit
elif type_ == 'Indicator':
return Indicator
elif type_ == 'IndicatorAction':
return IndicatorAction
elif type_ == 'IP':
return IP
elif type_ == 'PCAP':
return PCAP
elif type_ == 'RawData':
return RawData
elif type_ == 'RawDataType':
return RawDataType
elif type_ == 'Sample':
return Sample
elif type_ == 'SourceAccess':
return SourceAccess
elif type_ == 'Screenshot':
return Screenshot
elif type_ == 'Target':
return Target
elif type_ == 'UserRole':
return UserRole
else:
return None
| mit |
davidcusatis/horizon | openstack_dashboard/test/api_tests/cinder_tests.py | 4 | 17320 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.test.utils import override_settings
import six
import cinderclient as cinder_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
detailed = True
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=detailed,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
self.assertEqual(len(volumes), len(api_volumes))
def test_volume_list_paged(self):
search_opts = {'all_tenants': 1}
detailed = True
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=detailed,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, has_more, has_prev = api.cinder.volume_list_paged(
self.request, search_opts=search_opts)
self.assertEqual(len(volumes), len(api_volumes))
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_first_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[:page_size + 1]
expected_volumes = mox_volumes[:-1]
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=None).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertFalse(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_second_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[page_size:page_size * 2 + 1]
expected_volumes = mox_volumes[:-1]
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, marker=marker,
paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_last_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[-1 * page_size:]
expected_volumes = mox_volumes
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, marker=marker,
paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertFalse(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_back_from_some_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[page_size:page_size * 2 + 1]
expected_volumes = mox_volumes[:-1]
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:asc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, sort_dir="asc",
marker=marker, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_back_to_first_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[:page_size]
expected_volumes = mox_volumes
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:asc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, sort_dir="asc",
marker=marker, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertFalse(prev_data)
def test_volume_snapshot_list(self):
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_type_list_with_qos_associations(self):
volume_types = self.cinder_volume_types.list()
# Due to test data limitations, we can only run this test using
# one qos spec, which is associated with one volume type.
# If we use multiple qos specs, the test data will always
# return the same associated volume type, which is invalid
# and prevented by the UI.
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.list().AndReturn(volume_types)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_types = \
api.cinder.volume_type_list_with_qos_associations(self.request)
associate_spec = assoc_vol_types[0].associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
def test_volume_type_get_with_qos_association(self):
volume_type = self.cinder_volume_types.first()
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.get(volume_type.id).AndReturn(volume_type)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_type = \
api.cinder.volume_type_get_with_qos_association(self.request,
volume_type.id)
associate_spec = assoc_vol_type.associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
def test_absolute_limits_with_negative_values(self):
values = {"maxTotalVolumes": -1, "totalVolumesUsed": -1}
expected_results = {"maxTotalVolumes": float("inf"),
"totalVolumesUsed": 0}
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in six.iteritems(values):
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
cinderclient = self.stub_cinderclient()
cinderclient.limits = self.mox.CreateMockAnything()
cinderclient.limits.get().AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.cinder.tenant_absolute_limits(self.request)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
def test_pool_list(self):
pools = self.cinder_pools.list()
cinderclient = self.stub_cinderclient()
cinderclient.pools = self.mox.CreateMockAnything()
cinderclient.pools.list(detailed=True).AndReturn(pools)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.pool_list(self.request, detailed=True)
def test_volume_type_default(self):
volume_type = self.cinder_volume_types.first()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.default().AndReturn(volume_type)
self.mox.ReplayAll()
default_volume_type = api.cinder.volume_type_default(self.request)
self.assertEqual(default_volume_type, volume_type)
class CinderApiVersionTests(test.TestCase):
def setUp(self):
super(CinderApiVersionTests, self).setUp()
# The version is set when the module is loaded. Reset the
# active version each time so that we can test with different
# versions.
api.cinder.VERSIONS._active = None
def test_default_client_is_v2(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_v2_setting_returns_v2_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
def test_get_v2_volume_attributes(self):
# Get a v2 volume
volume = self.cinder_volumes.get(name="v2_volume")
self.assertTrue(hasattr(volume._apiresource, 'name'))
self.assertFalse(hasattr(volume._apiresource, 'display_name'))
name = "A v2 test volume name"
description = "A v2 volume description"
setattr(volume._apiresource, 'name', name)
setattr(volume._apiresource, 'description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v2_snapshot_attributes(self):
# Get a v2 snapshot
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot description")
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
name = "A v2 test snapshot name"
description = "A v2 snapshot description"
setattr(snapshot._apiresource, 'name', name)
setattr(snapshot._apiresource, 'description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_id_for_nameless_volume(self):
volume = self.cinder_volumes.first()
setattr(volume._apiresource, 'display_name', "")
self.assertEqual(volume.id, volume.name)
def test_adapt_dictionary_to_v2(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('name', ret_data.keys())
self.assertIn('description', ret_data.keys())
self.assertNotIn('display_name', ret_data.keys())
self.assertNotIn('display_description', ret_data.keys())
| apache-2.0 |
tempbottle/kbengine | kbe/res/scripts/common/Lib/multiprocessing/context.py | 98 | 10669 | import os
import sys
import threading
from . import process
__all__ = [] # things are copied from here to __init__.py
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
#
# Base type for contexts
#
class BaseContext(object):
ProcessError = ProcessError
BufferTooShort = BufferTooShort
TimeoutError = TimeoutError
AuthenticationError = AuthenticationError
current_process = staticmethod(process.current_process)
active_children = staticmethod(process.active_children)
def cpu_count(self):
'''Returns the number of CPUs in the system'''
num = os.cpu_count()
if num is None:
raise NotImplementedError('cannot determine number of cpus')
else:
return num
def Manager(self):
'''Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from .managers import SyncManager
m = SyncManager(ctx=self.get_context())
m.start()
return m
def Pipe(self, duplex=True):
'''Returns two connection object connected by a pipe'''
from .connection import Pipe
return Pipe(duplex)
def Lock(self):
'''Returns a non-recursive lock object'''
from .synchronize import Lock
return Lock(ctx=self.get_context())
def RLock(self):
'''Returns a recursive lock object'''
from .synchronize import RLock
return RLock(ctx=self.get_context())
def Condition(self, lock=None):
'''Returns a condition object'''
from .synchronize import Condition
return Condition(lock, ctx=self.get_context())
def Semaphore(self, value=1):
'''Returns a semaphore object'''
from .synchronize import Semaphore
return Semaphore(value, ctx=self.get_context())
def BoundedSemaphore(self, value=1):
'''Returns a bounded semaphore object'''
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value, ctx=self.get_context())
def Event(self):
'''Returns an event object'''
from .synchronize import Event
return Event(ctx=self.get_context())
def Barrier(self, parties, action=None, timeout=None):
'''Returns a barrier object'''
from .synchronize import Barrier
return Barrier(parties, action, timeout, ctx=self.get_context())
def Queue(self, maxsize=0):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, ctx=self.get_context())
def JoinableQueue(self, maxsize=0):
'''Returns a queue object'''
from .queues import JoinableQueue
return JoinableQueue(maxsize, ctx=self.get_context())
def SimpleQueue(self):
'''Returns a queue object'''
from .queues import SimpleQueue
return SimpleQueue(ctx=self.get_context())
def Pool(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
'''Returns a process pool object'''
from .pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild,
context=self.get_context())
def RawValue(self, typecode_or_type, *args):
'''Returns a shared object'''
from .sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(self, typecode_or_type, size_or_initializer):
'''Returns a shared array'''
from .sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(self, typecode_or_type, *args, lock=True):
'''Returns a synchronized shared object'''
from .sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock,
ctx=self.get_context())
def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
'''Returns a synchronized shared array'''
from .sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock,
ctx=self.get_context())
def freeze_support(self):
'''Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from .spawn import freeze_support
freeze_support()
def get_logger(self):
'''Return package logger -- if it does not already exist then
it is created.
'''
from .util import get_logger
return get_logger()
def log_to_stderr(self, level=None):
'''Turn on logging and add a handler which prints to stderr'''
from .util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling(self):
'''Install support for sending connections and sockets
between processes
'''
# This is undocumented. In previous versions of multiprocessing
# its only effect was to make socket objects inheritable on Windows.
from . import connection
def set_executable(self, executable):
'''Sets the path to a python.exe or pythonw.exe binary used to run
child processes instead of sys.executable when using the 'spawn'
start method. Useful for people embedding Python.
'''
from .spawn import set_executable
set_executable(executable)
def set_forkserver_preload(self, module_names):
'''Set list of module names to try to load in forkserver process.
This is really just a hint.
'''
from .forkserver import set_forkserver_preload
set_forkserver_preload(module_names)
def get_context(self, method=None):
if method is None:
return self
try:
ctx = _concrete_contexts[method]
except KeyError:
raise ValueError('cannot find context for %r' % method)
ctx._check_available()
return ctx
def get_start_method(self, allow_none=False):
return self._name
def set_start_method(self, method=None):
raise ValueError('cannot set start method of concrete context')
def _check_available(self):
pass
#
# Type of default context -- underlying context can be set at most once
#
class Process(process.BaseProcess):
_start_method = None
@staticmethod
def _Popen(process_obj):
return _default_context.get_context().Process._Popen(process_obj)
class DefaultContext(BaseContext):
Process = Process
def __init__(self, context):
self._default_context = context
self._actual_context = None
def get_context(self, method=None):
if method is None:
if self._actual_context is None:
self._actual_context = self._default_context
return self._actual_context
else:
return super().get_context(method)
def set_start_method(self, method, force=False):
if self._actual_context is not None and not force:
raise RuntimeError('context has already been set')
if method is None and force:
self._actual_context = None
return
self._actual_context = self.get_context(method)
def get_start_method(self, allow_none=False):
if self._actual_context is None:
if allow_none:
return None
self._actual_context = self._default_context
return self._actual_context._name
def get_all_start_methods(self):
if sys.platform == 'win32':
return ['spawn']
else:
from . import reduction
if reduction.HAVE_SEND_HANDLE:
return ['fork', 'spawn', 'forkserver']
else:
return ['fork', 'spawn']
DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_')
#
# Context types for fixed start method
#
if sys.platform != 'win32':
class ForkProcess(process.BaseProcess):
_start_method = 'fork'
@staticmethod
def _Popen(process_obj):
from .popen_fork import Popen
return Popen(process_obj)
class SpawnProcess(process.BaseProcess):
_start_method = 'spawn'
@staticmethod
def _Popen(process_obj):
from .popen_spawn_posix import Popen
return Popen(process_obj)
class ForkServerProcess(process.BaseProcess):
_start_method = 'forkserver'
@staticmethod
def _Popen(process_obj):
from .popen_forkserver import Popen
return Popen(process_obj)
class ForkContext(BaseContext):
_name = 'fork'
Process = ForkProcess
class SpawnContext(BaseContext):
_name = 'spawn'
Process = SpawnProcess
class ForkServerContext(BaseContext):
_name = 'forkserver'
Process = ForkServerProcess
def _check_available(self):
from . import reduction
if not reduction.HAVE_SEND_HANDLE:
raise ValueError('forkserver start method not available')
_concrete_contexts = {
'fork': ForkContext(),
'spawn': SpawnContext(),
'forkserver': ForkServerContext(),
}
_default_context = DefaultContext(_concrete_contexts['fork'])
else:
class SpawnProcess(process.BaseProcess):
_start_method = 'spawn'
@staticmethod
def _Popen(process_obj):
from .popen_spawn_win32 import Popen
return Popen(process_obj)
class SpawnContext(BaseContext):
_name = 'spawn'
Process = SpawnProcess
_concrete_contexts = {
'spawn': SpawnContext(),
}
_default_context = DefaultContext(_concrete_contexts['spawn'])
#
# Force the start method
#
def _force_start_method(method):
_default_context._actual_context = _concrete_contexts[method]
#
# Check that the current thread is spawning a child process
#
_tls = threading.local()
def get_spawning_popen():
return getattr(_tls, 'spawning_popen', None)
def set_spawning_popen(popen):
_tls.spawning_popen = popen
def assert_spawning(obj):
if get_spawning_popen() is None:
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(obj).__name__
)
| lgpl-3.0 |
liikGit/MissionPlanner | Lib/site-packages/numpy/NumpyDotNet/benchmark.py | 54 | 7011 |
import sys
import time
from numpy import *
UsingIronPython = False
if sys.subversion[0] == 'IronPython':
import System
UsingIronPython = True
#import numbers
#from random import random
class Complex(object):
def __init__(self, r, i):
self.__r = r
self.__i = i
def __eq__(self, other):
return (self.__r == other.__r) and (self.__i == other.__i)
def __ne__(self, other):
return not (self == other)
def __add__(self, other):
return Complex(self.__r + other.__r, self.__i + other.__i)
def __sub__(self, other):
return Complex(self.__r - other.__r, self.__i - other.__i)
def __mul__(self, other):
return Complex(self.__r * other.__r - self.__i * other.__i, self.__r * other.__i + self.__i * other.__r)
def __str__(self):
return "(%f, %f)" % (self.__r, self.__i)
sizes = (10, 100, 1000, 10000, 100000, 1000000)
print "sizes,", ",".join([str(s) for s in sizes])
def random():
# Resulting distribution isn't very random, could be better.
return 0.25
def creationTest(iters):
for size in sizes:
t0 = time.clock()
a = 0
for i in xrange(iters):
a = ndarray(size)
a.flat = i
t1 = time.clock()
times[size] = t1-t0
print "creation,", ",".join([str(t) for (s, t) in sorted(times.items())])
def viewCreationTest(iters):
for size in sizes:
t0 = time.clock()
a = ndarray(size)
for i in xrange(iters):
b = a[1:-1]
#b.Dispose()
t1 = time.clock()
times[size] = t1-t0
print "view creation,", ",".join([str(t) for (s, t) in sorted(times.items())])
def basicData():
tens = []
twenties = []
results = []
for size in sizes:
a = ndarray(size)
a[:] = 10
if a[0] != 10 or a[size-1] != 10:
print "Error: 'tens' array not initialized correctly (%s, %s)." % (a[0], a[size-1])
tens.append(a)
a = ndarray(size)
a[:] = 20
twenties.append(a)
a = ndarray(size)
results.append(a)
return tens, twenties, results
def multiplyTest(iters):
tens, twenties, results = basicData()
for i, size in enumerate(sizes):
a = tens[i]
b = twenties[i]
c = results[i]
t0 = time.clock()
for j in xrange(longIter):
multiply(a, b, c)
t1 = time.clock()
times[size] = t1-t0
if c[0] != 200:
print "Error: multiply produced incorrect value for c[0] (%s, expected 200)." % c[0]
if c[size-1] != 200:
print "Error: multiply produced incorect value for c[-1] (%s, expected 200)." % c[size-1]
print "multiply,", ",".join([str(t) for (s, t) in sorted(times.items())])
def addTest(iters):
tens, twenties, results = basicData()
for i, size in enumerate(sizes):
a = tens[i]
b = twenties[i]
c = results[i]
t0 = time.clock()
for j in xrange(iters):
for k in xrange(size):
c[k] = a[k] + b[k]
t1 = time.clock();
times[size] = t1-t0
if c[0] != 30 or c[size-1] != 30:
print "Error: add produced incorrect values for c[0], c[-1]: %s, %s expected 30, 30" % (c[0], c[size-1])
print "add,", ",".join([str(t) for (s, t) in sorted(times.items())])
def derivativeTest(iters):
# Derivative test
for size in sizes:
a = ndarray(size)
a[0] = 1.0
for j in range(1, size):
a[j] = a[j-1] + random() - 0.5 # Simple random walk
dt = 1.0
# Time the derivative calc.
tmp = ndarray(size-1)
t0 = time.clock()
dx = ndarray(size-1)
for j in range(iters):
try:
subtract(a[1:], a[:-1], tmp)
#dx = divide(tmp, dt, dx)
dx = tmp / dt
except Exception as e:
print "j = %s, tmp = %d\ndt = %s" % (j, len(tmp), dt)
raise e
t1 = time.clock()
times[size] = t1-t0
a = 0
dx = 0
print "derivative," , ",".join([str(t) for (s, t) in sorted(times.items())])
def convolutionTest(iters):
# Convolution.
for i in range(1, len(sizes)):
size = sizes[i]
size2 = sizes[0]
a = array(size)
b = array(size2)
a[0] = 1.0
b[0] = 1.0
for j in range(1, size):
a[j] = 1.0 #a[i] + random() - 0.5 # Simple random walk
for j in range(1, size2):
b[j] = 1.0 #b[j] + random() - 0.5
P, Q, N = len(a), len(b), len(a)+len(b)-1
r1 = range(iters)
r2 = range(N-1)
t0 = time.clock()
z = ndarray(N, 1)
#aa = a[0:9]
#bb = b[0:9]
tmp = ndarray(9, 1)
for j in r1:
for k in r2:
lower, upper = max(0, k-(Q-1)), min(P-1, k)
if lower <> upper and upper-lower == 9:
aa = a[lower:upper]
bb = b[k-upper:k-lower]
#tmp = aa * bb
multiply(aa, bb, tmp)
z[k] = tmp[0]
#z[k] = (a[lower:upper] * b[k-upper:k-lower])[0]
#tmp.Dispose()
#bb.Dispose()
#aa.Dispose()
t1 = time.clock()
times[size] = t1-t0
print "convolution,", ",".join([str(t) for (s, t) in sorted(times.items())])
def objectArrayTest(iters):
# Test again with object types.
for i, size in enumerate(sizes[:-1]):
with ndarray(size, 0) as a:
with ndarray(size, 0) as b:
with ndarray(size, 0) as c:
bValue = Complex(5.2, -3.2)
for p in xrange(size):
a[p] = Complex(14.2*i, 1.2*i+5.0)
b[p] = bValue
t0 = time.clock()
for j in xrange(iters):
subtract(a, b, c)
t1 = time.clock()
times[size] = t1-t0
print "object subtract,", ",".join([str(t) for (s, t) in sorted(times.items())])
def collect():
if UsingIronPython:
t0 = time.clock()
System.GC.Collect()
System.GC.WaitForPendingFinalizers()
t1 = time.clock()
print "Garbage collection time: %s" % (t1-t0)
longIter = 100
for k in range(3):
times = {}
#time.sleep(10);
#print "Starting creation test."
creationTest(longIter)
collect()
viewCreationTest(10000)
collect()
#print "Multiply test"
multiplyTest(longIter)
collect()
#print "Add test"
addTest(5)
collect()
#print "Derivative test"
derivativeTest(2000)
collect()
#convolutionTest(5)
collect()
if 0 and UsingIronPython:
objectArrayTest(20)
collect()
| gpl-3.0 |
aliguori/qemu-next | scripts/tracetool/backend/dtrace.py | 71 | 2135 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PROBEPREFIX = None
def _probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def _binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def c(events):
pass
def h(events):
out('#include "trace-dtrace.h"',
'')
for e in events:
out('static inline void trace_%(name)s(%(args)s) {',
' QEMU_%(uppername)s(%(argnames)s);',
'}',
name = e.name,
args = e.args,
uppername = e.name.upper(),
argnames = ", ".join(e.args.names()),
)
def d(events):
out('provider qemu {')
for e in events:
args = str(e.args)
# DTrace provider syntax expects foo() for empty
# params, not foo(void)
if args == 'void':
args = ''
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name = e.name,
args = args,
)
out('',
'};')
def stap(events):
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix = _probeprefix(),
name = e.name,
binary = _binary(),
)
i = 1
if len(e.args) > 0:
for name in e.args.names():
# Append underscore to reserved keywords
if name in ('limit', 'in', 'next', 'self'):
name += '_'
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
| gpl-2.0 |
Bysmyyr/blink-crosswalk | LayoutTests/http/tests/fetch/PRESUBMIT.py | 39 | 1307 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Chromium presubmit script for fetch API layout tests.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts.
'''
import os
import os.path
import re
'''
def missing_files(scripts_path, path_list):
for script in os.listdir(scripts_path):
if script.startswith('.') or not script.endswith('.js'):
continue
basename = re.sub(r'\.js$', '.html', os.path.basename(script))
for path in [os.path.join(path, basename) for path in path_list]:
if not os.path.exists(path):
yield path
def CheckChangeOnUpload(input, output):
contexts = ['window', 'workers', 'serviceworker']
top_path = input.PresubmitLocalPath()
script_tests_path = os.path.join(top_path, 'script-tests')
test_paths = [os.path.join(top_path, context) for context in contexts]
return [output.PresubmitPromptWarning('%s is missing' % path) for path
in missing_files(script_tests_path, test_paths)]
'''
# Because generate.py has been quite updated, this PRESUBMIT.py is obsolete
# and temporarily disabled.
def CheckChangeOnUpload(input, output):
return []
| bsd-3-clause |
akhmadMizkat/odoo | addons/utm/models/utm.py | 2 | 3323 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.http import request
class UtmMedium(models.Model):
# OLD crm.case.channel
_name = 'utm.medium'
_description = 'Channels'
_order = 'name'
name = fields.Char(string='Channel Name', required=True)
active = fields.Boolean(default=True)
class UtmCampaign(models.Model):
# OLD crm.case.resource.type
_name = 'utm.campaign'
_description = 'Campaign'
name = fields.Char(string='Campaign Name', required=True, translate=True)
class UtmSource(models.Model):
_name = 'utm.source'
_description = 'Source'
name = fields.Char(string='Source Name', required=True, translate=True)
class UtmMixin(models.AbstractModel):
"""Mixin class for objects which can be tracked by marketing. """
_name = 'utm.mixin'
campaign_id = fields.Many2one('utm.campaign', 'Campaign',
help="This is a name that helps you keep track of your different campaign efforts Ex: Fall_Drive, Christmas_Special")
source_id = fields.Many2one('utm.source', 'Source',
help="This is the source of the link Ex:Search Engine, another domain,or name of email list")
medium_id = fields.Many2one('utm.medium', 'Medium',
help="This is the method of delivery.Ex: Postcard, Email, or Banner Ad", oldname='channel_id')
def tracking_fields(self):
# This function cannot be overridden in a model which inherit utm.mixin
# Limitation by the heritage on AbstractModel
# record_crm_lead.tracking_fields() will call tracking_fields() from module utm.mixin (if not overridden on crm.lead)
# instead of the overridden method from utm.mixin.
# To force the call of overridden method, we use self.pool['utm.mixin'].tracking_fields() which respects overridden
# methods of utm.mixin, but will ignore overridden method on crm.lead
return [
# ("URL_PARAMETER", "FIELD_NAME_MIXIN", "NAME_IN_COOKIES")
('utm_campaign', 'campaign_id', 'odoo_utm_campaign'),
('utm_source', 'source_id', 'odoo_utm_source'),
('utm_medium', 'medium_id', 'odoo_utm_medium'),
]
@api.model
def default_get(self, fields):
values = super(UtmMixin, self).default_get(fields)
for url_param, field_name, cookie_name in self.env['utm.mixin'].tracking_fields():
if field_name in fields:
field = self._fields[field_name]
value = False
if request:
# ir_http dispatch saves the url params in a cookie
value = request.httprequest.cookies.get(cookie_name)
# if we receive a string for a many2one, we search/create the id
if field.type == 'many2one' and isinstance(value, basestring) and value:
Model = self.env[field.comodel_name]
records = Model.search([('name', '=', value)], limit=1)
if not records:
records = Model.create({'name': value})
value = records.id
values[field_name] = value
return values
| gpl-3.0 |
vicnet/weboob | modules/avendrealouer/browser.py | 2 | 3460 | # -*- coding: utf-8 -*-
# Copyright(C) 2017 ZeHiro
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.browser import PagesBrowser, URL
from weboob.capabilities.housing import HOUSE_TYPES
from .pages import CitiesPage, SearchPage, HousingPage
from .constants import QUERY_TYPES, QUERY_HOUSE_TYPES
class AvendrealouerBrowser(PagesBrowser):
BASEURL = 'https://www.avendrealouer.fr'
cities = URL(r'/common/api/localities\?term=(?P<term>)', CitiesPage)
search = URL(r'/recherche.html\?pageIndex=1&sortPropertyName=Price&sortDirection=Ascending&searchTypeID=(?P<type_id>.*)&typeGroupCategoryID=1&transactionId=1&localityIds=(?P<location_ids>.*)&typeGroupIds=(?P<type_group_ids>.*)(?P<rooms>.*)(?P<min_price>.*)(?P<max_price>.*)(?P<min_surface>.*)(?P<max_surface>.*)', SearchPage)
search_one = URL(r'/recherche.html\?localityIds=4-36388&reference=(?P<reference>.*)&hasMoreCriterias=true&searchTypeID=1', SearchPage)
housing = URL(r'/[vente|location].*', HousingPage)
def get_cities(self, pattern):
return self.cities.open(term=pattern).iter_cities()
def search_housings(self, query):
type_id = QUERY_TYPES[query.type]
house_types = []
for house_type in query.house_types:
if house_type == HOUSE_TYPES.UNKNOWN:
house_types = QUERY_HOUSE_TYPES[house_type]
break
house_types.append(QUERY_HOUSE_TYPES[house_type])
type_group_ids = ','.join(house_types)
location_ids = ','.join([city.id for city in query.cities])
def build_optional_param(query_field, query_string):
replace = ''
if getattr(query, query_field):
replace = '&%s=%s' % (query_string, getattr(query, query_field))
return replace
rooms = ''
if query.nb_rooms:
rooms = str(query.nb_rooms)
for i in range(query.nb_rooms + 1, 6):
rooms += ',%s' % str(i)
rooms = '&roomComfortIds=%s' % rooms
reg_exp = {
'type_id': type_id,
'type_group_ids': type_group_ids,
'location_ids': location_ids,
'rooms': rooms,
'min_price': build_optional_param('cost_min', 'minimumPrice'),
'max_price': build_optional_param('cost_max', 'maximumPrice'),
'min_surface': build_optional_param('area_min', 'minimumSurface'),
'max_surface': build_optional_param('area_max', 'maximumSurface')
}
return self.search.open(**reg_exp).iter_housings()
def get_housing(self, housing_id, obj=None):
url = self.search_one.open(reference=housing_id).get_housing_url()
return self.open(url).page.get_housing(obj=obj)
| lgpl-3.0 |
as110/as110.github.io | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/dalvik.py | 364 | 3442 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
*New in Pygments 1.6.*
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([A-Za-z0-9_$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([A-Za-z0-9_$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':[A-Za-z0-9_]+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},\(\):=\.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
| mit |
dimkarakostas/rupture | backend/breach/backtracking_analyzer.py | 2 | 5758 | import operator
import collections
import logging
logger = logging.getLogger(__name__)
def get_accumulated_probabilities(sorted_candidate_lengths, current_round_acc_probability, compression_function_factor, amplification_factor):
'''Take a dictionary of sorted candidate alphabets and calculate the
relative probability of each candidate being in the target secret based on
their associated accumulative lengths. Then associate the relative values
with the probability of the parent Round and calculate the final accumulated
probability.
Returns a dictionary containing every possible candidate alphabet and its
accumulated probability value.
'''
relative_probability_sum = 0.0
min_candidate_value = sorted_candidate_lengths[0]['length']
# Calculate relative probability sum based on each candidate's length.
for candidate in sorted_candidate_lengths:
relative_probability_sum += compression_function_factor ** (
-abs(candidate['length'] - min_candidate_value)
)
accumulated_probabilities = []
# Calculate every candidate's accumulated probability by multiplying its
# parent's probability with the relative value of this round and an
# amplification factor.
for candidate in sorted_candidate_lengths:
relative_prob = compression_function_factor ** (
-abs(candidate['length'] - min_candidate_value)
) / relative_probability_sum
accumulated_value = (
amplification_factor *
current_round_acc_probability *
relative_prob
)
accumulated_probabilities.append({
'candidate': candidate['candidate_alphabet'],
'probability': accumulated_value
})
return accumulated_probabilities
def get_candidates(candidate_lengths, accumulated_prob, compression_function_factor, amplification_factor):
'''Take a dictionary of candidate alphabets and their associated
accumulative lengths.
Returns a list with each candidate and its accumulated probability.
'''
assert(len(candidate_lengths) > 1)
accumulated_candidate_lengths = []
for candidate_alphabet, list_of_lengths in candidate_lengths.iteritems():
accumulated_candidate_lengths.append({
'candidate_alphabet': candidate_alphabet,
'length': sum(list_of_lengths)
})
# Sort sampleset groups by length.
sorted_candidate_lengths = sorted(
accumulated_candidate_lengths,
key=operator.itemgetter('length')
)
candidates_probabilities = get_accumulated_probabilities(sorted_candidate_lengths, accumulated_prob, compression_function_factor, amplification_factor)
logger.info(75 * '#')
logger.info('Candidate scoreboard:')
for cand in sorted_candidate_lengths:
logger.info('\t{}: {}'.format(cand['candidate_alphabet'], cand['length']))
logger.info(75 * '#')
return candidates_probabilities
def decide_next_backtracking_world_state(samplesets, accumulated_prob):
'''Take a list of samplesets and the accumulated probability of current
round and extract a decision for a state transition with a certain
probability for each candidate.
Arguments:
samplesets -- a list of samplesets.
accumulated_prob -- the accumulated probability of current knownalpahbet.
This list must must contain at least two elements so that we have some basis
for comparison. Each of the list's elements must share the same world state
(knownsecret and knownalphabet) so that we are comparing on the same basis.
The samplesets must contain at least two different candidate alphabets so
that a decision can be made. It can contain multiple samplesets collected
over the same candidate alphabet.
Returns an array of dictionary pairs. The first element of the pair is the new
state of every candidate; the second element of the pair is the
confidence with which the analyzer is suggesting the state transition.
'''
# Ensure we have enough sample sets to compare.
assert(len(samplesets) > 1)
# Ensure all samplesets are extending the same known state
knownsecret = samplesets[0].round.knownsecret
round = samplesets[0].round
victim = round.victim
target = victim.target
for sampleset in samplesets:
assert(sampleset.round == round)
# Split samplesets based on alphabetvector under consideration
# and collect data lengths for each candidate.
candidate_lengths = collections.defaultdict(lambda: [])
candidate_count_samplesets = collections.defaultdict(lambda: 0)
for sampleset in samplesets:
candidate_lengths[sampleset.candidatealphabet].append(sampleset.datalength)
candidate_count_samplesets[sampleset.candidatealphabet] += 1
candidate_count_samplesets = candidate_count_samplesets.items()
samplesets_per_candidate = candidate_count_samplesets[0][1]
for alphabet, count in candidate_count_samplesets:
assert(count == samplesets_per_candidate)
# Ensure we have a decision to make
assert(len(candidate_lengths) > 1)
compression_function_factor = samplesets[0].round.victim.target.compression_function_factor
amplification_factor = samplesets[0].round.victim.target.amplification_factor
candidates = get_candidates(candidate_lengths, accumulated_prob, compression_function_factor, amplification_factor)
state = []
# All candidates are returned in order to create new rounds.
for i in candidates:
state.append({
'knownsecret': knownsecret + i['candidate'],
'probability': i['probability'],
'knownalphabet': target.alphabet
})
return state
| mit |
cuilishen/cuilishenMissionPlanner | Lib/site-packages/scipy/cluster/tests/test_vq.py | 51 | 6554 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
# David Cournapeau
# Last Change: Wed Nov 05 07:00 PM 2008 J
import os.path
import warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal, \
TestCase, run_module_suite
from scipy.cluster.vq import kmeans, kmeans2, py_vq, py_vq2, vq, ClusterError
try:
from scipy.cluster import _vq
TESTC=True
except ImportError:
print "== Error while importing _vq, not testing C imp of vq =="
TESTC=False
#Optional:
# import modules that are located in the same directory as this file.
DATAFILE1 = os.path.join(os.path.dirname(__file__), "data.txt")
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestVq(TestCase):
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
code = initc.copy()
label1 = py_vq(X, initc)[0]
assert_array_equal(label1, LABEL1)
def test_py_vq2(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
code = initc.copy()
label1 = py_vq2(X, initc)[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
code = initc.copy()
if TESTC:
label1, dist = _vq.vq(X, initc)
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(X, initc)
else:
print "== not testing C imp of vq =="
#def test_py_vq_1d(self):
# """Test special rank 1 vq algo, python implementation."""
# data = X[:, 0]
# initc = data[:3]
# code = initc.copy()
# a, b = _py_vq_1d(data, initc)
# ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
# assert_array_equal(a, ta)
# assert_array_equal(b, tb)
def test_vq_1d(self):
"""Test special rank 1 vq algo, python implementation."""
data = X[:, 0]
initc = data[:3]
code = initc.copy()
if TESTC:
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
else:
print "== not testing C imp of vq (rank 1) =="
class TestKMean(TestCase):
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 1e2
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
res = kmeans(data, 2)
def test_kmeans_simple(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
code = initc.copy()
code1 = kmeans(X, code, iter = 1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
"""This will cause kmean to have a cluster with no points."""
data = np.fromfile(open(DATAFILE1), sep = ", ")
data = data.reshape((200, 2))
initk = np.array([[-1.8127404, -0.67128041],
[ 2.04621601, 0.07401111],
[-2.31149087,-0.05160469]])
res = kmeans(data, initk)
warnings.simplefilter('ignore', UserWarning)
try:
res = kmeans2(data, initk, missing = 'warn')
finally:
warnings.simplefilter('default', UserWarning)
try :
res = kmeans2(data, initk, missing = 'raise')
raise AssertionError("Exception not raised ! Should not happen")
except ClusterError, e:
pass
def test_kmeans2_simple(self):
"""Testing simple call to kmeans2 and its results."""
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
code = initc.copy()
code1 = kmeans2(X, code, iter = 1)[0]
code2 = kmeans2(X, code, iter = 2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
"""Testing simple call to kmeans2 with rank 1 data."""
data = np.fromfile(open(DATAFILE1), sep = ", ")
data = data.reshape((200, 2))
data1 = data[:, 0]
data2 = data[:, 1]
initc = data1[:3]
code = initc.copy()
code1 = kmeans2(data1, code, iter = 1)[0]
code2 = kmeans2(data1, code, iter = 2)[0]
def test_kmeans2_rank1_2(self):
"""Testing simple call to kmeans2 with rank 1 data."""
data = np.fromfile(open(DATAFILE1), sep = ", ")
data = data.reshape((200, 2))
data1 = data[:, 0]
code1 = kmeans2(data1, 2, iter = 1)
def test_kmeans2_init(self):
"""Testing that kmeans2 init methods work."""
data = np.fromfile(open(DATAFILE1), sep = ", ")
data = data.reshape((200, 2))
kmeans2(data, 3, minit = 'random')
kmeans2(data, 3, minit = 'points')
# Check special case 1d
data = data[:, :1]
kmeans2(data, 3, minit = 'random')
kmeans2(data, 3, minit = 'points')
def test_kmeans2_empty(self):
"""Ticket #505."""
try:
kmeans2([], 2)
raise AssertionError("This should not succeed.")
except ValueError, e:
# OK, that's what we expect
pass
def test_kmeans_0k(self):
"""Regression test for #546: fail when k arg is 0."""
try:
kmeans(X, 0)
raise AssertionError("kmeans with 0 clusters should fail.")
except ValueError:
pass
try:
kmeans2(X, 0)
raise AssertionError("kmeans2 with 0 clusters should fail.")
except ValueError:
pass
try:
kmeans2(X, np.array([]))
raise AssertionError("kmeans2 with 0 clusters should fail.")
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
maxive/erp | setup/odoo-wsgi.example.py | 36 | 1723 | # WSGI Handler sample configuration file.
#
# Change the appropriate settings below, in order to provide the parameters
# that would normally be passed in the command-line.
# (at least conf['addons_path'])
#
# For generic wsgi handlers a global application is defined.
# For uwsgi this should work:
# $ uwsgi_python --http :9090 --pythonpath . --wsgi-file openerp-wsgi.py
#
# For gunicorn additional globals need to be defined in the Gunicorn section.
# Then the following command should run:
# $ gunicorn odoo:service.wsgi_server.application -c openerp-wsgi.py
import odoo
#----------------------------------------------------------
# Common
#----------------------------------------------------------
odoo.multi_process = True # Nah!
# Equivalent of --load command-line option
odoo.conf.server_wide_modules = ['web']
conf = odoo.tools.config
# Path to the OpenERP Addons repository (comma-separated for
# multiple locations)
conf['addons_path'] = '../../addons/trunk,../../web/trunk/addons'
# Optional database config if not using local socket
#conf['db_name'] = 'mycompany'
#conf['db_host'] = 'localhost'
#conf['db_user'] = 'foo'
#conf['db_port'] = 5432
#conf['db_password'] = 'secret'
#----------------------------------------------------------
# Generic WSGI handlers application
#----------------------------------------------------------
application = odoo.service.wsgi_server.application
odoo.service.server.load_server_wide_modules()
#----------------------------------------------------------
# Gunicorn
#----------------------------------------------------------
# Standard OpenERP XML-RPC port is 8069
bind = '127.0.0.1:8069'
pidfile = '.gunicorn.pid'
workers = 4
timeout = 240
max_requests = 2000
| agpl-3.0 |
pmajka/3dbar | bin/parsers/paxinos_watson_rbisc/svgfix.py | 2 | 19761 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# #
# This file is part of 3d Brain Atlas Reconstructor #
# #
# Copyright (C) 2010-2012 Piotr Majka, Jakub M. Kowalski #
# #
# 3d Brain Atlas Reconstructor is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# #
# 3d Brain Atlas Reconstructor is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with 3d Brain Atlas Reconstructor. If not, see #
# http://www.gnu.org/licenses/. #
# #
###############################################################################
"""
Module or simpylying input SVG file structure. In many converters SVG files are created with
preserving groups of objects. It results in generating lot of C{g} objets.
Each of nested C{g} may have own transformation matrix. What is more, most nested object
(text, path, line, etc. may have its own transformation matrix defined). Nesting all those
transformation causes lot of confusion and dissalows extracting filnal coordinates of objects
in a direct way.
This modlule applies all nested transfomarions and leaves objects with their final coordinates
allowing further modules direct extraction of coordinates and dimensions.
@note: Only absolute coordinates (capital letters in segmants names) are parsed properly.
G{importgraph}
Currently only translate and scale transformations are supprted.
re_trd - transformations dictionary:
- translate: translate(number,number)
- scalexy: scale(number,number)
- scale: scale(number)
Usage exapmle:
>>> import svgfix
>>> _dev_fix_img(dom_svg_object)
@see: http://www.w3.org/TR/SVG/coords.html#NestedTransformations
G{importgraph}
"""
import re
import xml.dom.minidom as dom
import numpy as np
import sys
from string import *
from config import *
import time
from svgpathparse import parsePath, UnparsePath
def __getTransformMatrix(tr):
"""
@type tr: string
@param tr: Transformation. String extracted from 'transformation' attrubute
of SVG element.
@return : NumPy transformation matrix equivalent to provided transformation string.
How the function works?
1. Transformation string is a serie of tanslate or scale commands:
eg. 'translate(20,20) scale(.3)' thus we have two sepatate transformations which
we need to parse separately and multiply to calculate final matrix.
2. If an element does not have any transformation, indentity matrix should be returned.
Expamples:
>>> import atlasparser
>>> atlasparser.svgfix.__getTransformMatrix("")
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> atlasparser.svgfix.__getTransformMatrix("translate(.2,3) scale(.4)")
array([[ 0.4, 0. , 0.2],
[ 0. , 0.4, 3. ],
[ 0. , 0. , 1. ]])
@note: Subsequent transformations should be separated by one or more whitespace chars.
@see: Details of transforming SVG objects: http://www.w3.org/TR/SVG/coords.html
"""
# TODO: Make this function more general (ie. empty string exceptions handling...)
# TODO implement more general splitting (re.split??)
# Split transformation string by space
# Except 'matrix' transformation: it should be handled separetely:
if re.search(re_trd['matrix'],tr):
transformations_list=[strip(tr)]
else:
transformations_list=split(strip(tr)," ")
# Define initial transform matrix which is identity matrix:
ctm=np.eye(3) #ctm - current transformation matrix
# Iterate over transformations and define transformation matrix
# for each elementaty transformation
# TODO: It should be written in less time-consuming way.(string.beginswith??)
for transformation in transformations_list:
for tr_type in re_trd.keys():
values=re_trd[tr_type].search(transformation)
if values:
# define transformation matrix for current elementary transformation
# ntm - new transformation matrix
ntm=__defineTransformationMatrix(tr_type,values)
#if non-identity transformation was found, add new transformation
#by multiplying current transformation matrix by new transformatiom matrix
ctm=np.dot(ctm,ntm)
continue
#If no more transformations left, return current transformation matrix
return ctm
def __defineTransformationMatrix(tr_type,v):
"""
@type tr_type: string
@param tr_type: Type of transformation, one of the re_trd dictionary keys.
@type v: tuple
@param v: Tuple of values extracted from transformation string
(ammount of translation, scaling factor, matrix elements,...).
In other words results of appying regular expression to transformation string.
@return : Transformation matrix for given elementary transformation string.
"""
# v it's a shourtcut for values.
# Handles all values extracted from transformation string
v=map( float ,v.groups() )
if tr_type=='translate':
ret=[ [ 1 , 0 , v[0] ], \
[ 0 , 1 , v[1] ], \
[ 0 , 0 , 1. ] ]
return np.array( ret, dtype=float)
if tr_type=='scalexy':
ret=[ [ v[0] , 0 , 0.], \
[ 0 , v[1] , 0.], \
[ 0 , 0 , 1.] ]
return np.array( ret, dtype=float)
if tr_type=='scalex':
ret=[ [ v[0] , 0 , 0.], \
[ 0 , v[0] , 0.], \
[ 0 , 0 , 1.] ]
return np.array( ret, dtype=float)
if tr_type=='matrix':
ret=[ [ v[0] , v[2] , v[4] ], \
[ v[1] , v[3] , v[5] ], \
[ 0 , 0 , 1. ] ]
return np.array( ret, dtype=float)
# No transformation has been found - use identity transformation
return np.eye(3)
def __fixElement(el,gm):
"""
@type el: DOM object
@param el: element to be fixed.
@type gm: NumPy array
@param gm: Transformation matrix of parrent element
(gm - global transformation matrix)
@return : nothing - only element C{ el} is modified.
Converts coordinates in given element to absolute values depending on element type.
Steps:
1. Check, if given element has any transformation defined.
If yes, compute transformation matrix for this element as well as new
transformation matix..
2. Correct coordinates of element depending on its type.
3. Perform element-dependent corredtion (scale font size, stroke, etc.)
"""
# Check if given element has "transform" element
if el.hasAttribute('transform'):
# Get transformation matrix for given element
# (tm - transformation matrix of given element)
tm=__getTransformMatrix(el.attributes['transform'].value)
# Remove transformation string - we do not need it anymore
el.removeAttribute('transform')
# Define current transformation matrix.
cm=np.dot(gm,tm)
else:
# If there is no transformation string in given element,
# Current transformation matrix is the same as
# transformation matrix of parrent element (gm)
cm=gm
# Fix element depending on its type:
if el.nodeName=='text':
__fixText(el,cm)
if el.nodeName=='line':
__fixLine(el,cm)
if el.nodeName=='polygon':
__fixPolygon(el,cm)
if el.nodeName=='polyline':
__fixPolyline(el,cm)
if el.nodeName=='path':
__fixPath(el,cm)
def __fixLine(el,cm):
"""
@type el: DOM object
@param el: Line SVG element to be modified.
@type cm: NumPy array
@param cm: Transformation matrix to be applied.
@return : nothing - only element C{el} is modified.
Transforms line element of SVG file to final coordinates.
Function assumes that element has correctly defined x1,y1 and x2,y2 attributes.
If an unhandled excepion is raised, it means that element is defined incorretly.
"""
# Transform point (x1,y1) to new coordinates (x1',y1')
(el.attributes['x1'].value,el.attributes['y1'].value)=\
map(str, __transformPoint( [el.attributes['x1'].value , el.attributes['y1'].value] ,cm))
# Transform point (x2,y2) to new coordinates (x2',y2')
(el.attributes['x2'].value,el.attributes['y2'].value)=\
map(str, __transformPoint( [el.attributes['x2'].value , el.attributes['y2'].value] ,cm))
def __fixText(el,cm):
"""
@type el: DOM object
@param el: Text SVG element to be modified.
@type cm: NumPy array
@param cm: Transformation matrix to be applied.
@return : nothing - only element C{el} is modified.
Transforms text element of SVG file to final coordinates. Initial coordinates are optional.
Function tried to scale font size. If final font size is less than 1px it is forced to be 1px.
"""
# TODO: Put lot of exceptions handling
# If there are x and y attributes, renember their values and remove those attributes:
if el.hasAttribute('x') and el.hasAttribute('y'):
# Renember current coordiantes of label
# cc - current coordinates: coordinates before transformation
# ( vector of three values: [x,y,1] )
# p - temporary variable
p=map( float , [ el.attributes['x'].value , el.attributes['y'].value ] )
cc=np.array([ [p[0]] , [p[1]] , [1] ])
# Remove currnet coordinates
el.removeAttribute('x')
el.removeAttribute('y')
else:
# If there are no x and y attributes, we assume that initial coordinates are (0,0)
# thus vector of coordinates is (0,0,1)
cc=np.array([[0],[0],[1]])
# nc - New coordinates: coordinates after applying transformations
nc=np.dot(cm,cc)
# Update coordintes of labels
el.setAttribute('x',str(nc[0][0]))
el.setAttribute('y',str(nc[1][0]))
# If element has 'font-size' attribute, update font size also
if el.hasAttribute('font-size'):
result=re_fontsizepx.search(el.attributes['font-size'].value)
if result:
CurrentFontSize=float(result.groups()[0])
# New font-size value is:
# (current font size) * (scale factor: cm[0][0])
# then we need to round int, convert to integer and then to string
# When font size is less or equal than zero, it causes many parsers to crash
# In such case font size is forced to be at least one px
NewFontSize=int(round(CurrentFontSize*cm[0][0]))
if NewFontSize <= 1:
el.attributes['font-size'].value='1px'
else:
el.attributes['font-size'].value=str(NewFontSize)+'px'
def __fixPolygon(el,cm):
"""
@type el: DOM object
@param el: Polygon SVG element to be modified.
@type cm: NumPy array
@param cm: Transformation matrix to be applied.
@return : nothing - only element C{el} is modified.
@requires: Coordinates in 'points' attrubute has to be separated by whitespaces.
@requires: Correctly defined coordinated eg. C{points='2.3,-5.0 34,5'}.
Funtion transforms all points in polygon one by one using provided transformation matrix.
"""
# Buffer which holds string that will be passed as a value of 'points' attribute
buffer=""
try:
# Split "points" attribute value in order to get list of points
PointsTable=split(el.attributes['points'].value)
# Now we transform each point to new coordinates
for pt in PointsTable:
# Extract pair of points from string and then
# convert results to list
PtPair=list(re_PointsPair.search(pt).groups() )
# Transform given point using provided transformation matrix
# and convert results tuple of strings and update buffer string
buffer+="%s,%s " % tuple(map(str, __transformPoint(PtPair,cm) ))
# Update 'points' attribute of polygon element
el.attributes['points'].value=buffer
except:
print "Error parsing polygon element"
def __fixPolyline(el,cm):
"""
@type el: DOM object
@param el: Polyline SVG element to be modified.
@type cm: NumPy array
@param cm: Transformation matrix to be applied.
@return : nothing - only element C{el} is modified.
@requires: Coordinates in 'points' attrubute has to be separated by whitespaces.
@requires: Correctly defined coordinated eg. C{points='2.3,-5.0 34,5'}.
Funtion transforms all points in polygon one by one using provided transformation matrix.
@todo : In final version all polylines should be broken into lines.
It is important because of further line <-> label assingment.
Currenty, polylines are not broken into pieces.
"""
# TODO: Broke polylines into lines
# Again, we use loop+buffer method:
# Define empty buffer:
buffer=""
# Split "points" attribute vallue it order to get list of points
PointsTable=split(el.attributes['points'].value)
# Now we transform each point to new coordinates
for pt in PointsTable:
# Extract pair of points from string and then
# convert results to list
PtPair=list(re_PointsPair.search(pt).groups() )
# Transform given point using provided transformation matrix
# and convert results tuple of strings and update buffer string
buffer+="%s,%s " % tuple(map(str, __transformPoint(PtPair,cm) ))
# Update 'points' attribute of polygon element
el.attributes['points'].value=buffer
def __fixPath(el,cm):
"""
@type el: DOM object
@param el: Path SVG element to be modified.
@type cm: NumPy array
@param cm: Transformation matrix to be applied.
@return : nothing - only element C{el} is modified.
@requires: Segments of paths has to be separated by whitespace so we split 'd' string by whitespace.
Funtion transforms all points in path one by one using provided transformation matrix.
"""
# Create table of path elements by splitting 'd' string
SegmentsTable=split(el.attributes['d'].value)
pathPoints = parsePath(el.attributes['d'].value)
for i in range(len(pathPoints)):
for j in [j for j in range(0, len(pathPoints[i][1:][0]), 2)]:
pathPoints[i][1:][0][j:j+2] = __transformPoint(pathPoints[i][1:][0][j:j+2], cm)
# Update path definition
el.attributes['d'].value = UnparsePath(pathPoints)
def __transformPoint(point,matrix):
"""
@type point: list
@param point: List/tuple of two elements from which point coordinates could be extracted.
Coordinates may be either strings, integers or floats.
Mapping all values to floats is applied before calculations.
@type matrix: NumPy array 3x3
@param matrix: Transformation matrix to be applied.
@return : List of transformated coordinates (floats): [x',y']
Function simply transforms fiven point from one coodrinates system into another defined by
transformation matrix.
"""
#TODO: Implement exception handling
# Convert passed list to float
oc=map( float, point)
# Create vector of three elements
oc=np.array( [ [oc[0]],[oc[1]],[1] ] )
# Multiply vector by transformation matrix
nc=np.dot(matrix,oc)
# Extract and return new point coordinates
return [ nc[0][0], nc[1][0] ]
def __fixHeader(svgdom, pagenumber = None):
"""
@type svgdom: DOM object
@param svgdom: Whole SVG document
@type pagenumber: integer
@param pagenumber: Number of slide to parse
@return : nothing, C{svgdom} is modified in-place.
Function changes viewPort,viewBox and other defined parameters od SVG document
in order to correct errors made by converters. Those properties are to be fixed
before further operations.
@note: New attributes and their values are defined manually in configuration module.
Reason is that they differ among atlases and (esspecially) converters.
"""
# Fix selected elements
svg=svgdom.getElementsByTagName('svg')[0]
for attr in CONFIG_AttrToFixSVG.keys():
if svg.hasAttribute(attr): svg.removeAttribute(attr)
svg.setAttribute(attr,CONFIG_AttrToFixSVG[attr])
for g in svgdom.getElementsByTagName('g'):
# Select custom heder
# Custom header depends on slide number as different slides
# may be prepared in slightly different way and may require
# some custom change
for rng in range(len(CONFIG_SPECIAL_SLIDE_RANGE)):
if pagenumber in CONFIG_SPECIAL_SLIDE_RANGE[rng]:
customAttribFix=CONFIG_AttrToFixG[rng]
"""
print pagenumber
print customAttribFix
print rng
"""
for attr in customAttribFix.keys():
if g.hasAttribute(attr): g.removeAttribute(attr)
g.setAttribute(attr,customAttribFix[attr])
def fixSvgImage(svgdoc, pagenumber=None, fixHeader=True):
"""
@type svgdoc: DOM object
@param svgdoc: Whole SVG document.
@type pagenumber: integer
@param pagenumber: Number of slide to parse.
If set to none, this parameter would not be used
@return : nothing, C{svgdom} is modified in-place.
Performs all operations related to fixing SVG file.
Function fixes header as well applies all transformations for given SVG file.
"""
# Fix viewbox if needed
if fixHeader: __fixHeader(svgdoc, pagenumber)
for g in svgdoc.getElementsByTagName('g'):
# Get group transformation matrix:
if g.hasAttribute("transform"):
gmatrix=__getTransformMatrix(g.attributes['transform'].value)
# Remove transformation attribute from g element.
g.removeAttribute('transform')
else:
gmatrix=np.eye(3)
# Transform elements one by one
for svgElement in g.childNodes:
try:
# TODO: Implement it somehow better. Checking for exception everytime is not good idea
# TODO: Do it recursively for nested groups:)
__fixElement(svgElement,gmatrix)
except:
continue
if __name__ == '__main__':
svgdom = dom.parse(sys.argv[1])
fixSvgImage(svgdom)
f=open(sys.argv[2],'w')
svgdom.writexml(f)
f.close()
| gpl-3.0 |
ehomeshasha/easydata | easydata/constant.py | 1 | 1220 | from django.utils.translation import ugettext_lazy as _
CONTENT_TYPE = {
'pdf': 'application/pdf',
'image': ['image/png','image/jpeg','image/pjpeg','image/gif','image/bmp'],
'file': ['application/x-compressed',
'application/x-zip-compressed',
'application/zip',
'multipart/x-zip',
'application/x-rar-compressed',
'application/octet-stream',
'text/plain',
'multipart/x-gzip',
'application/x-gzip',
'application/x-compressed',
'application/x-tar'
]
}
UPLOAD_EXT = {
'pdf': 'pdf',
'image': ['png','jpg','jpeg','gif','bmp'],
'file': ['zip','rar','txt','gzip','gz','tar'],
}
PDF_UPLOAD_DIR = 'media_files/pdf2html/'
CTYPE_DICT = {
'pdf': _('PDF'),
'learning': _('Learning System'),
'tiku': _('Tiku'),
}
HOME_BREAD = {'text': _('Home'), 'href': '/'}
LANGUAGE_DICT = {
'en': {'shortname': _("en"), 'tinymce': 'en'},
'zh-cn': {'shortname': _("zh"), 'tinymce': 'zh_CN'},
}
PERPAGE = 20
PERMISSION_ERROR = {
'level': 'ERROR',
'title': _('Permission Error'),
'body':_('You\'re not allowed to handle this request'),
}
| mit |
nlholdem/icodoom | .venv/lib/python2.7/site-packages/pbr/cmd/main.py | 34 | 3422 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import sys
import pkg_resources
import pbr.version
def _get_metadata(package_name):
try:
return json.loads(
pkg_resources.get_distribution(
package_name).get_metadata('pbr.json'))
except pkg_resources.DistributionNotFound:
raise Exception('Package {0} not installed'.format(package_name))
except Exception:
return None
def get_sha(args):
sha = _get_info(args.name)['sha']
if sha:
print(sha)
def get_info(args):
print("{name}\t{version}\t{released}\t{sha}".format(
**_get_info(args.name)))
def _get_info(name):
metadata = _get_metadata(name)
version = pkg_resources.get_distribution(name).version
if metadata:
if metadata['is_release']:
released = 'released'
else:
released = 'pre-release'
sha = metadata['git_version']
else:
version_parts = version.split('.')
if version_parts[-1].startswith('g'):
sha = version_parts[-1][1:]
released = 'pre-release'
else:
sha = ""
released = "released"
for part in version_parts:
if not part.isdigit():
released = "pre-release"
return dict(name=name, version=version, sha=sha, released=released)
def freeze(args):
sorted_dists = sorted(pkg_resources.working_set,
key=lambda dist: dist.project_name.lower())
for dist in sorted_dists:
info = _get_info(dist.project_name)
output = "{name}=={version}".format(**info)
if info['sha']:
output += " # git sha {sha}".format(**info)
print(output)
def main():
parser = argparse.ArgumentParser(
description='pbr: Python Build Reasonableness')
parser.add_argument(
'-v', '--version', action='version',
version=str(pbr.version.VersionInfo('pbr')))
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
cmd_sha = subparsers.add_parser('sha', help='print sha of package')
cmd_sha.set_defaults(func=get_sha)
cmd_sha.add_argument('name', help='package to print sha of')
cmd_info = subparsers.add_parser(
'info', help='print version info for package')
cmd_info.set_defaults(func=get_info)
cmd_info.add_argument('name', help='package to print info of')
cmd_freeze = subparsers.add_parser(
'freeze', help='print version info for all installed packages')
cmd_freeze.set_defaults(func=freeze)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
print(e)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
google/llvm-propeller | lldb/test/API/functionalities/gdb_remote_client/TestRegDefinitionInParts.py | 8 | 7315 | from __future__ import print_function
import lldb
import time
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
from gdbclientutils import *
class TestRegDefinitionInParts(GDBRemoteTestBase):
@skipIfXmlSupportMissing
@skipIfRemote
def test(self):
"""
Test that lldb correctly fetches the target definition file
in multiple chunks if the remote server only provides the
content in small parts, and the small parts it provides is
smaller than the maximum packet size that it declared at
the start of the debug session. qemu does this.
"""
class MyResponder(MockGDBServerResponder):
def qXferRead(self, obj, annex, offset, length):
if annex == "target.xml":
return """<?xml version="1.0"?>
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<target version="1.0">
<architecture>i386:x86-64</architecture>
<xi:include href="i386-64bit-core.xml"/>
</target>""", False
if annex == "i386-64bit-core.xml" and offset == 0:
return """<?xml version="1.0"?>
<!-- Copyright (C) 2010-2015 Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. -->
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<feature name="org.gnu.gdb.i386.core">
<flags id="i386_eflags" size="4">
<field name="CF" start="0" end="0"/>
<field name="" start="1" end="1"/>
<field name="PF" start="2" end="2"/>
<field name="AF" start="4" end="4"/>
<field name="ZF" start="6" end="6"/>
<field name="SF" start="7" end="7"/>
<field name="TF" start="8" end="8"/>
<field name="IF" start="9" end="9"/>
<field name="DF" start="10" end="10"/>
<field name="OF" start="11" end="11"/>
<field name="NT" start="14" end="14"/>
<field name="RF" start="16" end="16"/>
<field name="VM" start="17" end="17"/>
<field name="AC" start="18" end="18"/>
<field name="VIF" start="19" end="19"/>
<field name="VIP" start="20" end="20"/>
<field name="ID" start="21" end="21"/>
</flags>
<reg name="rax" bitsize="64" type="int64"/>
<reg name="rbx" bitsize="64" type="int64"/>
<reg name="rcx" bitsize="64" type="int64"/>
<reg name="rdx" bitsize="64" type="int64"/>
<reg name="rsi" bitsize="64" type="int64"/>
<reg name="rdi" bitsize="64" type="int64"/>
<reg name="rbp" bitsize="64" type="data_ptr"/>
<reg name="rsp" bitsize="64" type="data_ptr"/>
<reg name="r8" bitsize="64" type="int64"/>
<reg name="r9" bitsize="64" type="int64"/>
<reg name="r10" bitsize="64" type="int64"/>
<reg name="r11" bitsize="64" type="int64"/>
<reg name="r12" bitsize="64" type="int64"/>
<reg name="r13" bitsize="64" type="int64"/>
<reg name="r14" bitsize="64" type="int64"/>
<reg name="r15" bitsize="64" type="int64"/>
<reg name="rip" bitsize="64" type="code_ptr"/>
<reg name="eflags" bitsize="32" type="i386_eflags"/>
<reg name="cs" bitsize="32" type="int32"/>
<reg name="ss" bitsize="32" ty""", True
if annex == "i386-64bit-core.xml" and offset == 2045:
return """pe="int32"/>
<reg name="ds" bitsize="32" type="int32"/>
<reg name="es" bitsize="32" type="int32"/>
<reg name="fs" bitsize="32" type="int32"/>
<reg name="gs" bitsize="32" type="int32"/>
<reg name="st0" bitsize="80" type="i387_ext"/>
<reg name="st1" bitsize="80" type="i387_ext"/>
<reg name="st2" bitsize="80" type="i387_ext"/>
<reg name="st3" bitsize="80" type="i387_ext"/>
<reg name="st4" bitsize="80" type="i387_ext"/>
<reg name="st5" bitsize="80" type="i387_ext"/>
<reg name="st6" bitsize="80" type="i387_ext"/>
<reg name="st7" bitsize="80" type="i387_ext"/>
<reg name="fctrl" bitsize="32" type="int" group="float"/>
<reg name="fstat" bitsize="32" type="int" group="float"/>
<reg name="ftag" bitsize="32" type="int" group="float"/>
<reg name="fiseg" bitsize="32" type="int" group="float"/>
<reg name="fioff" bitsize="32" type="int" group="float"/>
<reg name="foseg" bitsize="32" type="int" group="float"/>
<reg name="fooff" bitsize="32" type="int" group="float"/>
<reg name="fop" bitsize="32" type="int" group="float"/>
</feature>""", False
return None, False
def readRegister(self, regnum):
return ""
def readRegisters(self):
return "0600000000000000c0b7c00080fffffff021c60080ffffff1a00000000000000020000000000000078b7c00080ffffff203f8ca090ffffff103f8ca090ffffff3025990a80ffffff809698000000000070009f0a80ffffff020000000000000000eae10080ffffff00000000000000001822d74f1a00000078b7c00080ffffff0e12410080ffff004602000011111111222222223333333300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f0300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000801f0000"
def haltReason(self):
return "T02thread:dead;threads:dead;"
def qfThreadInfo(self):
return "mdead"
def qC(self):
return ""
def qSupported(self, client_supported):
return "PacketSize=1000;qXfer:features:read+"
def QThreadSuffixSupported(self):
return "OK"
def QListThreadsInStopReply(self):
return "OK"
self.server.responder = MyResponder()
if self.TraceOn():
self.runCmd("log enable gdb-remote packets")
time.sleep(10)
self.addTearDownHook(
lambda: self.runCmd("log disable gdb-remote packets"))
target = self.dbg.CreateTargetWithFileAndArch(None, None)
process = self.connect(target)
if self.TraceOn():
interp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
interp.HandleCommand("target list", result)
print(result.GetOutput())
rip_valobj = process.GetThreadAtIndex(0).GetFrameAtIndex(0).FindRegister("rip")
self.assertEqual(rip_valobj.GetValueAsUnsigned(), 0x00ffff800041120e)
ss_valobj = process.GetThreadAtIndex(0).GetFrameAtIndex(0).FindRegister("ss")
self.assertEqual(ss_valobj.GetValueAsUnsigned(), 0x22222222)
if self.TraceOn():
print("rip is 0x%x" % rip_valobj.GetValueAsUnsigned())
print("ss is 0x%x" % ss_valobj.GetValueAsUnsigned())
| apache-2.0 |
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/dummy_threading.py | 102 | 2900 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
:Author: Brett Cannon
:Contact: brett@python.org
XXX: Try to get rid of ``_dummy_threading``.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
| epl-1.0 |
RonnyPfannschmidt/pytest | testing/test_pastebin.py | 3 | 6207 | import io
from typing import List
from typing import Union
import pytest
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
class TestPasteCapture:
@pytest.fixture
def pastebinlist(self, monkeypatch, request) -> List[Union[str, bytes]]:
pastebinlist: List[Union[str, bytes]] = []
plugin = request.config.pluginmanager.getplugin("pastebin")
monkeypatch.setattr(plugin, "create_new_paste", pastebinlist.append)
return pastebinlist
def test_failed(self, pytester: Pytester, pastebinlist) -> None:
testpath = pytester.makepyfile(
"""
import pytest
def test_pass() -> None:
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
"""
)
reprec = pytester.inline_run(testpath, "--pastebin=failed")
assert len(pastebinlist) == 1
s = pastebinlist[0]
assert s.find("def test_fail") != -1
assert reprec.countoutcomes() == [1, 1, 1]
def test_all(self, pytester: Pytester, pastebinlist) -> None:
from _pytest.pytester import LineMatcher
testpath = pytester.makepyfile(
"""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
"""
)
reprec = pytester.inline_run(testpath, "--pastebin=all", "-v")
assert reprec.countoutcomes() == [1, 1, 1]
assert len(pastebinlist) == 1
contents = pastebinlist[0].decode("utf-8")
matcher = LineMatcher(contents.splitlines())
matcher.fnmatch_lines(
[
"*test_pass PASSED*",
"*test_fail FAILED*",
"*test_skip SKIPPED*",
"*== 1 failed, 1 passed, 1 skipped in *",
]
)
def test_non_ascii_paste_text(self, pytester: Pytester, pastebinlist) -> None:
"""Make sure that text which contains non-ascii characters is pasted
correctly. See #1219.
"""
pytester.makepyfile(
test_unicode="""\
def test():
assert '☺' == 1
"""
)
result = pytester.runpytest("--pastebin=all")
expected_msg = "*assert '☺' == 1*"
result.stdout.fnmatch_lines(
[
expected_msg,
"*== 1 failed in *",
"*Sending information to Paste Service*",
]
)
assert len(pastebinlist) == 1
class TestPaste:
@pytest.fixture
def pastebin(self, request):
return request.config.pluginmanager.getplugin("pastebin")
@pytest.fixture
def mocked_urlopen_fail(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen call to emulate a HTTP Error 400."""
calls = []
import urllib.error
import urllib.request
def mocked(url, data):
calls.append((url, data))
raise urllib.error.HTTPError(url, 400, "Bad request", {}, io.BytesIO())
monkeypatch.setattr(urllib.request, "urlopen", mocked)
return calls
@pytest.fixture
def mocked_urlopen_invalid(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen calls done by the internal plugin
function that connects to bpaste service, but return a url in an
unexpected format."""
calls = []
def mocked(url, data):
calls.append((url, data))
class DummyFile:
def read(self):
# part of html of a normal response
return b'View <a href="/invalid/3c0c6750bd">raw</a>.'
return DummyFile()
import urllib.request
monkeypatch.setattr(urllib.request, "urlopen", mocked)
return calls
@pytest.fixture
def mocked_urlopen(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen calls done by the internal plugin
function that connects to bpaste service."""
calls = []
def mocked(url, data):
calls.append((url, data))
class DummyFile:
def read(self):
# part of html of a normal response
return b'View <a href="/raw/3c0c6750bd">raw</a>.'
return DummyFile()
import urllib.request
monkeypatch.setattr(urllib.request, "urlopen", mocked)
return calls
def test_pastebin_invalid_url(self, pastebin, mocked_urlopen_invalid) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert (
result
== "bad response: invalid format ('View <a href=\"/invalid/3c0c6750bd\">raw</a>.')"
)
assert len(mocked_urlopen_invalid) == 1
def test_pastebin_http_error(self, pastebin, mocked_urlopen_fail) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert result == "bad response: HTTP Error 400: Bad request"
assert len(mocked_urlopen_fail) == 1
def test_create_new_paste(self, pastebin, mocked_urlopen) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert result == "https://bpaste.net/show/3c0c6750bd"
assert len(mocked_urlopen) == 1
url, data = mocked_urlopen[0]
assert type(data) is bytes
lexer = "text"
assert url == "https://bpaste.net"
assert "lexer=%s" % lexer in data.decode()
assert "code=full-paste-contents" in data.decode()
assert "expiry=1week" in data.decode()
def test_create_new_paste_failure(self, pastebin, monkeypatch: MonkeyPatch) -> None:
import io
import urllib.request
def response(url, data):
stream = io.BytesIO(b"something bad occurred")
return stream
monkeypatch.setattr(urllib.request, "urlopen", response)
result = pastebin.create_new_paste(b"full-paste-contents")
assert result == "bad response: invalid format ('something bad occurred')"
| mit |
dcroc16/skunk_works | google_appengine/google/appengine/tools/download_appstats.py | 1 | 6069 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script for downloading Appstats data using remote_api.
Usage:
%prog [-s HOSTNAME] [-p PATH] [-o OUTPUTFILE] [-j] [-q] [-m] [APPID]
If the -s HOSTNAME flag is not specified, the APPID must be specified.
"""
from google.appengine.tools import os_compat
import getpass
import logging
import optparse
import os
import sys
from google.appengine.ext.appstats import loader
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import appengine_rpc
DEFAULT_PATH_PYTHON = '/_ah/remote_api'
DEFAULT_PATH_JAVA = '/remote_api'
DEFAULT_FILE = 'appstats.pkl'
def auth_func():
return (raw_input('Email: '), getpass.getpass('Password: '))
def download_appstats(servername, appid, path, secure,
rpc_server_factory, filename, appdir,
merge, java_application):
"""Invoke remote_api to download appstats data."""
if os.path.isdir(appdir):
sys.path.insert(0, appdir)
try:
logging.info('Importing appengine_config from %s', appdir)
import appengine_config
except ImportError, err:
logging.warn('Failed to load appengine_config: %s', err)
remote_api_stub.ConfigureRemoteApi(appid, path, auth_func,
servername=servername,
save_cookies=True, secure=secure,
rpc_server_factory=rpc_server_factory)
remote_api_stub.MaybeInvokeAuthentication()
os.environ['SERVER_SOFTWARE'] = 'Development (remote_api_shell)/1.0'
if not appid:
appid = os.environ['APPLICATION_ID']
download_data(filename, merge, java_application)
def download_data(filename, merge, java_application):
"""Download appstats data from memcache."""
oldrecords = []
oldfile = None
if merge:
try:
oldfile = open(filename, 'rb')
except IOError:
logging.info('No file to merge. Creating new file %s',
filename)
if oldfile:
logging.info('Merging with existing file %s', filename)
oldrecords = loader.UnpickleFromFile(oldfile)
oldfile.close()
if oldrecords:
last_timestamp = oldrecords[0].start_timestamp_milliseconds()
records = loader.FromMemcache(filter_timestamp=last_timestamp,
java_application=java_application)
else:
records = loader.FromMemcache(java_application=java_application)
merged_records = records + oldrecords
try:
outfile = open(filename, 'wb')
except IOError:
logging.error('Cannot open %s', filename)
return
loader.PickleToFile(merged_records, outfile)
outfile.close()
def main(argv):
"""Parse arguments and run shell."""
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('-s', '--server', dest='server',
help='The hostname your app is deployed on. '
'Defaults to <app_id>.appspot.com.')
parser.add_option('-o', '--output', dest='filename', default=DEFAULT_FILE,
help='The file to which Appstats data must '
'be downloaded. A .pkl extension is '
'recommended. Defaults to %s.' % DEFAULT_FILE)
parser.add_option('-p', '--path', dest='path',
help='The path on the server to the remote_api handler. '
'Defaults to %s for python and %s for java. '
% (DEFAULT_PATH_PYTHON, DEFAULT_PATH_JAVA))
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help='do not print download status messages to stdout')
parser.add_option('-j', '--java',
action='store_true', dest='java_application', default=False,
help='set this for downloading from a java application')
parser.add_option('-m', '--merge',
action='store_true', dest='merge', default=False,
help='if file exists, merge rather than overwrite')
parser.add_option('--secure', dest='secure', action='store_true',
default=False, help='Use HTTPS when communicating '
'with the server.')
parser.add_option('--appdir', dest='appdir', action='store', default='.',
help='application directory, for finding '
'appengine_config.py. Defaults to ".".')
(options, args) = parser.parse_args()
if ((not options.server and not args) or len(args) > 2
or (options.path and len(args) > 1)):
parser.print_usage(sys.stderr)
if len(args) > 2:
print >> sys.stderr, 'Unexpected arguments: %s' % args[2:]
elif options.path and len(args) > 1:
print >> sys.stderr, 'Path specified twice.'
sys.exit(1)
servername = options.server
appid = None
if options.java_application:
default_path = DEFAULT_PATH_JAVA
else:
default_path = DEFAULT_PATH_PYTHON
path = options.path or default_path
if args:
if servername:
appid = args[0]
else:
servername = '%s.appspot.com' % args[0]
if len(args) == 2:
path = args[1]
if options.verbose:
logging.getLogger().setLevel(logging.INFO)
download_appstats(servername, appid, path, options.secure,
appengine_rpc.HttpRpcServer, options.filename,
options.appdir, options.merge, options.java_application)
if __name__ == '__main__':
main(sys.argv)
| mit |
dbader/envconfig | test_envconfig.py | 1 | 2265 | # coding=utf8
"""Unit tests for the config module."""
# Silence "missing docstring", "method could be a function", and
# "too many public methods" messages:
# pylint: disable-msg=R0201,C0111,R0904
import unittest
import os
import envconfig
class TestConfig(unittest.TestCase):
def assert_get_set_bool(self, value, expected_value):
os.environ['TEST_BOOLEAN_VALUE'] = value
self.assertEquals(envconfig.bool('TEST_BOOLEAN_VALUE'), expected_value)
def test_getstr(self):
os.environ['TEST_VALUE_STR'] = 'Hello, World'
self.assertEquals(envconfig.str('TEST_VALUE_STR'), 'Hello, World')
def test_getstr_strip_whitespace(self):
os.environ['TEST_VALUE_STR'] = ' hello '
self.assertEquals(envconfig.str('TEST_VALUE_STR'), 'hello')
def test_getint(self):
os.environ['TEST_VALUE_INT'] = '12345'
self.assertEquals(envconfig.int('TEST_VALUE_INT'), 12345)
def test_getbool(self):
self.assert_get_set_bool('yes', True)
self.assert_get_set_bool('1', True)
self.assert_get_set_bool('YeS', True)
self.assert_get_set_bool('True', True)
self.assert_get_set_bool('true', True)
self.assert_get_set_bool(' 1 ', True)
self.assert_get_set_bool('YES\t', True)
self.assert_get_set_bool('\tYES\t', True)
self.assert_get_set_bool('false', False)
self.assert_get_set_bool('no', False)
self.assert_get_set_bool('0', False)
self.assert_get_set_bool(' NO ', False)
def test_getinvalid(self):
if 'DOES_NOT_EXIST' in os.environ:
del os.environ['DOES_NOT_EXIST']
self.assertRaises(KeyError, lambda: envconfig.str('DOES_NOT_EXIST'))
def test_invalid_bool(self):
os.environ['INVALID_BOOL'] = 'nope'
self.assertRaises(ValueError, lambda: envconfig.bool('INVALID_BOOL'))
def test_getlist(self):
os.environ['LIST_TEST'] = 'one,two, three ,four '
self.assertEquals(envconfig.list('LIST_TEST'),
['one', 'two', 'three', 'four'])
os.environ['LIST_TEST'] = 'one#two# three #four '
self.assertEquals(envconfig.list('LIST_TEST', separator='#'),
['one', 'two', 'three', 'four'])
| mit |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/certificates/migrations/0009_auto__del_field_generatedcertificate_graded_download_url__del_field_ge.py | 188 | 6118 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.graded_download_url'
db.delete_column('certificates_generatedcertificate', 'graded_download_url')
# Deleting field 'GeneratedCertificate.graded_certificate_id'
db.delete_column('certificates_generatedcertificate', 'graded_certificate_id')
# Adding field 'GeneratedCertificate.distinction'
db.add_column('certificates_generatedcertificate', 'distinction',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.create_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.delete_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
# Adding field 'GeneratedCertificate.graded_download_url'
db.add_column('certificates_generatedcertificate', 'graded_download_url',
self.gf('django.db.models.fields.CharField')(default=False, max_length=128),
keep_default=False)
# Adding field 'GeneratedCertificate.graded_certificate_id'
db.add_column('certificates_generatedcertificate', 'graded_certificate_id',
self.gf('django.db.models.fields.CharField')(default=False, max_length=32),
keep_default=False)
# Deleting field 'GeneratedCertificate.distinction'
db.delete_column('certificates_generatedcertificate', 'distinction')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '255'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '128'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
severinson/Coded-Shuffling | tests/simulationtests.py | 2 | 4673 | ############################################################################
# Copyright 2016 Albin Severinson #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
'''This module contains tests of the simulation module.
'''
import os
import math
import unittest
import tempfile
import pandas as pd
import simulation
from functools import partial
from model import SystemParameters
from solvers.heuristicsolver import HeuristicSolver
from evaluation.binsearch import SampleEvaluator
class EvaluationTests(unittest.TestCase):
'''Tests of the simulation module.'''
def verify_result(self, result, correct_result, delta=0.1):
'''Check the results against known correct results.
Args:
result: Measured result.
correct_result: Dict with correct results.
delta: Correct result must be within a delta fraction of the
measured result.
'''
for key, value in correct_result.items():
if value == math.inf:
self.assertAlmostEqual(result[key].mean(), value, places=1,
msg='key={}, value={}'.format(str(key), str(value)))
else:
self.assertAlmostEqual(result[key].mean(), value, delta=value*delta,
msg='key={}, value={}'.format(str(key), str(value)))
def verify_solver(self, solver, parameters, correct_results):
'''Check the results from evaluating the assignment produced by some
solver against known correct results.
Args:
solver: Assignment solver.
parameters: System parameters.
correct_results: List of dicts with correct results.
'''
evaluator = binsearch.SampleEvaluator(num_samples=1000)
for par, correct_result in zip(parameters, correct_results):
assignment = solver.solve(par)
self.assertTrue(assignment.is_valid())
result = evaluator.evaluate(par, assignment)
self.verify_result(result, correct_result)
return
def test_simulation(self):
'''Test basic functionality.'''
parameters = SystemParameters(rows_per_batch=5, num_servers=10, q=9, num_outputs=9,
server_storage=1/3, num_partitions=5)
correct = {'servers': 9, 'batches': 324, 'delay': 25.460714285714285/9,
'unicast_load_1': 720/540/9, 'multicast_load_1': 840/540/9,
'unicast_load_2': 0, 'multicast_load_2': 1470/540/9}
solver = HeuristicSolver()
evaluator = SampleEvaluator(num_samples=1000)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, parameters.identifier() + '.csv')
dataframe = simulation.simulate(
parameters,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
self.verify_result(dataframe, correct)
simulate_fun = partial(
simulation.simulate,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
dataframe = simulation.simulate_parameter_list(
parameter_list=[parameters],
simulate_fun=simulate_fun,
map_complexity_fun=lambda x: 1,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
self.verify_result(dataframe, correct)
return
| apache-2.0 |
pgonda/servo | python/mach/mach/logging.py | 125 | 8070 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# This file contains logging functionality for mach. It essentially provides
# support for a structured logging framework built on top of Python's built-in
# logging framework.
from __future__ import absolute_import, unicode_literals
try:
import blessings
except ImportError:
blessings = None
import json
import logging
import sys
import time
def format_seconds(total):
"""Format number of seconds to MM:SS.DD form."""
minutes, seconds = divmod(total, 60)
return '%2d:%05.2f' % (minutes, seconds)
class ConvertToStructuredFilter(logging.Filter):
"""Filter that converts unstructured records into structured ones."""
def filter(self, record):
if hasattr(record, 'action') and hasattr(record, 'params'):
return True
record.action = 'unstructured'
record.params = {'msg': record.getMessage()}
record.msg = '{msg}'
return True
class StructuredJSONFormatter(logging.Formatter):
"""Log formatter that writes a structured JSON entry."""
def format(self, record):
action = getattr(record, 'action', 'UNKNOWN')
params = getattr(record, 'params', {})
return json.dumps([record.created, action, params])
class StructuredHumanFormatter(logging.Formatter):
"""Log formatter that writes structured messages for humans.
It is important that this formatter never be added to a logger that
produces unstructured/classic log messages. If it is, the call to format()
could fail because the string could contain things (like JSON) that look
like formatting character sequences.
Because of this limitation, format() will fail with a KeyError if an
unstructured record is passed or if the structured message is malformed.
"""
def __init__(self, start_time, write_interval=False, write_times=True):
self.start_time = start_time
self.write_interval = write_interval
self.write_times = write_times
self.last_time = None
def format(self, record):
f = record.msg.format(**record.params)
if not self.write_times:
return f
elapsed = self._time(record)
return '%s %s' % (format_seconds(elapsed), f)
def _time(self, record):
t = record.created - self.start_time
if self.write_interval and self.last_time is not None:
t = record.created - self.last_time
self.last_time = record.created
return t
class StructuredTerminalFormatter(StructuredHumanFormatter):
"""Log formatter for structured messages writing to a terminal."""
def set_terminal(self, terminal):
self.terminal = terminal
def format(self, record):
f = record.msg.format(**record.params)
if not self.write_times:
return f
t = self.terminal.blue(format_seconds(self._time(record)))
return '%s %s' % (t, self._colorize(f))
def _colorize(self, s):
if not self.terminal:
return s
result = s
reftest = s.startswith('REFTEST ')
if reftest:
s = s[8:]
if s.startswith('TEST-PASS'):
result = self.terminal.green(s[0:9]) + s[9:]
elif s.startswith('TEST-UNEXPECTED'):
result = self.terminal.red(s[0:20]) + s[20:]
elif s.startswith('TEST-START'):
result = self.terminal.yellow(s[0:10]) + s[10:]
elif s.startswith('TEST-INFO'):
result = self.terminal.yellow(s[0:9]) + s[9:]
if reftest:
result = 'REFTEST ' + result
return result
class LoggingManager(object):
"""Holds and controls global logging state.
An application should instantiate one of these and configure it as needed.
This class provides a mechanism to configure the output of logging data
both from mach and from the overall logging system (e.g. from other
modules).
"""
def __init__(self):
self.start_time = time.time()
self.json_handlers = []
self.terminal_handler = None
self.terminal_formatter = None
self.root_logger = logging.getLogger()
self.root_logger.setLevel(logging.DEBUG)
# Installing NullHandler on the root logger ensures that *all* log
# messages have at least one handler. This prevents Python from
# complaining about "no handlers could be found for logger XXX."
self.root_logger.addHandler(logging.NullHandler())
self.mach_logger = logging.getLogger('mach')
self.mach_logger.setLevel(logging.DEBUG)
self.structured_filter = ConvertToStructuredFilter()
self.structured_loggers = [self.mach_logger]
self._terminal = None
@property
def terminal(self):
if not self._terminal and blessings:
# Sometimes blessings fails to set up the terminal. In that case,
# silently fail.
try:
terminal = blessings.Terminal(stream=sys.stdout)
if terminal.is_a_tty:
self._terminal = terminal
except Exception:
pass
return self._terminal
def add_json_handler(self, fh):
"""Enable JSON logging on the specified file object."""
# Configure the consumer of structured messages.
handler = logging.StreamHandler(stream=fh)
handler.setFormatter(StructuredJSONFormatter())
handler.setLevel(logging.DEBUG)
# And hook it up.
for logger in self.structured_loggers:
logger.addHandler(handler)
self.json_handlers.append(handler)
def add_terminal_logging(self, fh=sys.stdout, level=logging.INFO,
write_interval=False, write_times=True):
"""Enable logging to the terminal."""
formatter = StructuredHumanFormatter(self.start_time,
write_interval=write_interval, write_times=write_times)
if self.terminal:
formatter = StructuredTerminalFormatter(self.start_time,
write_interval=write_interval, write_times=write_times)
formatter.set_terminal(self.terminal)
handler = logging.StreamHandler(stream=fh)
handler.setFormatter(formatter)
handler.setLevel(level)
for logger in self.structured_loggers:
logger.addHandler(handler)
self.terminal_handler = handler
self.terminal_formatter = formatter
def replace_terminal_handler(self, handler):
"""Replace the installed terminal handler.
Returns the old handler or None if none was configured.
If the new handler is None, removes any existing handler and disables
logging to the terminal.
"""
old = self.terminal_handler
if old:
for logger in self.structured_loggers:
logger.removeHandler(old)
if handler:
for logger in self.structured_loggers:
logger.addHandler(handler)
self.terminal_handler = handler
return old
def enable_unstructured(self):
"""Enable logging of unstructured messages."""
if self.terminal_handler:
self.terminal_handler.addFilter(self.structured_filter)
self.root_logger.addHandler(self.terminal_handler)
def disable_unstructured(self):
"""Disable logging of unstructured messages."""
if self.terminal_handler:
self.terminal_handler.removeFilter(self.structured_filter)
self.root_logger.removeHandler(self.terminal_handler)
def register_structured_logger(self, logger):
"""Register a structured logger.
This needs to be called for all structured loggers that don't chain up
to the mach logger in order for their output to be captured.
"""
self.structured_loggers.append(logger)
| mpl-2.0 |
mikeckennedy/python-for-entrepreneurs-course-demos | 12_user_accounts/final_12_blue_yellow_app/blue_yellow_app/controllers/home_controller.py | 10 | 1152 | import pyramid_handlers
from blue_yellow_app.controllers.base_controller import BaseController
from blue_yellow_app.infrastructure.supressor import suppress
class HomeController(BaseController):
alternate_mode = False
@pyramid_handlers.action(renderer='templates/home/index.pt')
def index(self):
return {'value': 'HOME'}
@pyramid_handlers.action(renderer='templates/home/about.pt')
def about(self):
return {'value': 'ABOUT'}
@pyramid_handlers.action(renderer='templates/home/bookus.pt')
def bookus(self):
return {}
@pyramid_handlers.action(renderer='templates/home/contact.pt')
def contact(self):
return {'value': 'CONTACT'}
@pyramid_handlers.action(renderer='templates/home/image_credits.pt')
def image_credits(self):
return {}
@suppress
def dont_expose_as_web_action(self):
print("Called dont_expose_as_web_action, what happened?")
def alternate_row_style(self):
alt = self.alternate_mode
self.alternate_mode = not self.alternate_mode
if alt:
return "alternate"
else:
return ""
| mit |
abhikumar22/MYBLOG | blg/Lib/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| gpl-3.0 |
wdwvt1/qiime | qiime/align_seqs.py | 15 | 11369 | #!/usr/bin/env python
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = [
"Rob Knight",
"Greg Caporaso",
"Jeremy Widmann",
"Kyle Bittinger"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
"""Contains code for aligning sequences, using several techniques.
This module has the responsibility for taking a set of sequences and
returning an alignment. Mostly, it will be thin wrappers for code
already in cogent.app.*, to which wrappers for e.g. PyNAST need to be
added..
"""
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from os import remove
from numpy import median
import bfillings
from bfillings.infernal import cmalign_from_alignment
import bfillings.clustalw
import bfillings.muscle_v38
import bfillings.mafft
from cogent.parse.rfam import MinimalRfamParser, ChangedSequence
from burrito.util import ApplicationNotFoundError
from skbio.io import RecordError
from skbio.parse.sequences import parse_fasta
from qiime.util import (FunctionWithParams,
get_qiime_temp_dir)
from skbio.alignment import SequenceCollection, Alignment
from skbio.sequence import DNASequence
from skbio.parse.sequences import parse_fasta
# Load PyNAST if it's available. If it's not, skip it if not but set up
# to raise errors if the user tries to use it.
try:
from pynast.util import pynast_seqs, pairwise_alignment_methods
from pynast.logger import NastLogger
except ImportError:
def raise_pynast_not_found_error(*args, **kwargs):
raise ApplicationNotFoundError("PyNAST cannot be found.\nIs PyNAST installed? Is it in your $PYTHONPATH?" +
"\nYou can obtain PyNAST from http://qiime.org/pynast/.")
# set functions which cannot be imported to raise_pynast_not_found_error
pynast_seqs = NastLogger = raise_pynast_not_found_error
pairwise_alignment_methods = {}
class Aligner(FunctionWithParams):
"""An Aligner takes an unaligned set of sequences and returns an alignment.
This is an abstract class: subclasses should implement the __call__
method.
Note: sequence ids should be preserved during this process, i.e. the
description lines should be saved/restored if the alignment app is
destructive to them.
"""
Name = 'Aligner'
def __init__(self, params):
"""Return new Aligner object with specified params.
Note: expect params to contain both generic and per-method (e.g. for
infernal vs. PyNAST vs. whatever) params, so leaving it as a dict
rather than setting attributes. Some standard entries in params are:
Application: 3rd-party application used, if any, e.g. infernal
[can't actually think of any other params that apply to all of
e.g. PyNAST, infernal, and muscle]
"""
self.Params = params
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns alignment from sequences.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, should
dump the result to the desired path as fasta, otherwise should
return skbio.core.alignment.Alignment object.
log_path: path to log, which should include dump of params.
"""
raise NotImplementedError("Aligner is an abstract class")
class CogentAligner(Aligner):
"""Generic aligner using Cogent multiple alignment methods."""
Name = 'CogentAligner'
def getResult(self, seq_path):
"""Returns alignment from sequences.
By convention, app parameters begin with a '-'. Key-value
pairs in self.Params following this convention will be passed
as parameters to the module's alignment function.
"""
module = self.Params['Module']
seqs = self.getData(seq_path)
params = dict(
[(k, v) for (k, v) in self.Params.items() if k.startswith('-')])
result = module.align_unaligned_seqs(seqs, params=params)
return result
def __call__(self, result_path=None, log_path=None, *args, **kwargs):
"""Calls superclass method to align seqs"""
return FunctionWithParams.__call__(self, result_path=result_path,
log_path=log_path, *args, **kwargs)
class InfernalAligner(Aligner):
Name = 'InfernalAligner'
def __init__(self, params):
"""Return new InfernalAligner object with specified params.
"""
_params = {
'Application': 'Infernal',
}
_params.update(params)
Aligner.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None,
failure_path=None, cmbuild_params=None, cmalign_params=None):
log_params = []
# load candidate sequences
candidate_sequences = dict(parse_fasta(open(seq_path, 'U')))
# load template sequences
try:
info, template_alignment, struct = list(MinimalRfamParser(open(
self.Params['template_filepath'], 'U'),
seq_constructor=ChangedSequence))[0]
except RecordError:
raise ValueError(
"Template alignment must be in Stockholm format with corresponding secondary structure annotation when using InfernalAligner.")
# Need to make separate mapping for unaligned sequences
unaligned = SequenceCollection.from_fasta_records(
candidate_sequences.iteritems(), DNASequence)
mapped_seqs, new_to_old_ids = unaligned.int_map(prefix='unaligned_')
mapped_seq_tuples = [(k, str(v)) for k,v in mapped_seqs.iteritems()]
# Turn on --gapthresh option in cmbuild to force alignment to full
# model
if cmbuild_params is None:
cmbuild_params = {}
cmbuild_params.update({'--gapthresh': 1.0})
# record cmbuild parameters
log_params.append('cmbuild parameters:')
log_params.append(str(cmbuild_params))
# Turn on --sub option in Infernal, since we know the unaligned sequences
# are fragments.
# Also turn on --gapthresh to use same gapthresh as was used to build
# model
if cmalign_params is None:
cmalign_params = {}
cmalign_params.update({'--sub': True, '--gapthresh': 1.0})
# record cmalign parameters
log_params.append('cmalign parameters:')
log_params.append(str(cmalign_params))
# Align sequences to alignment including alignment gaps.
aligned, struct_string = cmalign_from_alignment(aln=template_alignment,
structure_string=struct,
seqs=mapped_seq_tuples,
include_aln=True,
params=cmalign_params,
cmbuild_params=cmbuild_params)
# Pull out original sequences from full alignment.
infernal_aligned = []
# Get a dict of the ids to sequences (note that this is a
# cogent alignment object, hence the call to NamedSeqs)
aligned_dict = aligned.NamedSeqs
for n, o in new_to_old_ids.iteritems():
aligned_seq = aligned_dict[n]
infernal_aligned.append((o, aligned_seq))
# Create an Alignment object from alignment dict
infernal_aligned = Alignment.from_fasta_records(infernal_aligned, DNASequence)
if log_path is not None:
log_file = open(log_path, 'w')
log_file.write('\n'.join(log_params))
log_file.close()
if result_path is not None:
result_file = open(result_path, 'w')
result_file.write(infernal_aligned.to_fasta())
result_file.close()
return None
else:
try:
return infernal_aligned
except ValueError:
return {}
class PyNastAligner(Aligner):
Name = 'PyNastAligner'
def __init__(self, params):
"""Return new PyNastAligner object with specified params.
"""
_params = {
'min_pct': 75.0,
'min_len': 150,
'blast_db': None,
'template_filepath': None,
'pairwise_alignment_method': 'blast',
'Application': 'PyNAST',
'Algorithm': 'NAST',
}
_params.update(params)
Aligner.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None,
failure_path=None):
# load candidate sequences
seq_file = open(seq_path, 'U')
candidate_sequences = parse_fasta(seq_file)
# load template sequences
template_alignment = []
template_alignment_fp = self.Params['template_filepath']
for seq_id, seq in parse_fasta(open(template_alignment_fp)):
# replace '.' characters with '-' characters
template_alignment.append((seq_id, seq.replace('.', '-').upper()))
template_alignment = Alignment.from_fasta_records(
template_alignment, DNASequence, validate=True)
# initialize_logger
logger = NastLogger(log_path)
# get function for pairwise alignment method
pairwise_alignment_f = pairwise_alignment_methods[
self.Params['pairwise_alignment_method']]
pynast_aligned, pynast_failed = pynast_seqs(
candidate_sequences,
template_alignment,
min_pct=self.Params['min_pct'],
min_len=self.Params['min_len'],
align_unaligned_seqs_f=pairwise_alignment_f,
logger=logger,
temp_dir=get_qiime_temp_dir())
logger.record(str(self))
for i, seq in enumerate(pynast_failed):
skb_seq = DNASequence(str(seq), id=seq.Name)
pynast_failed[i] = skb_seq
pynast_failed = SequenceCollection(pynast_failed)
for i, seq in enumerate(pynast_aligned):
skb_seq = DNASequence(str(seq), id=seq.Name)
pynast_aligned[i] = skb_seq
pynast_aligned = Alignment(pynast_aligned)
if failure_path is not None:
fail_file = open(failure_path, 'w')
fail_file.write(pynast_failed.to_fasta())
fail_file.close()
if result_path is not None:
result_file = open(result_path, 'w')
result_file.write(pynast_aligned.to_fasta())
result_file.close()
return None
else:
return pynast_aligned
def compute_min_alignment_length(seqs_f, fraction=0.75):
""" compute the min alignment length as n standard deviations below the mean """
med_length = median([len(s) for _, s in parse_fasta(seqs_f)])
return int(med_length * fraction)
alignment_method_constructors = {'pynast': PyNastAligner,
'infernal': InfernalAligner}
alignment_module_names = {
'muscle': bfillings.muscle_v38,
'clustalw': bfillings.clustalw,
'mafft': bfillings.mafft,
'infernal': bfillings.infernal,
}
| gpl-2.0 |
alephu5/Soundbyte | statistics/linear_complexity.py | 1 | 2280 | #! ../environment/bin/python3.3
#This test calculates the length of the smallest linear-feedback
#register to estimate complextiy, and decides whether
#the sequence is likely to be random. This is based on the idea
#that a random sequence is unlikely to have a short linear-feeback
#register.
from scipy.stats import chi2
from numpy import sum
from bitdata import partition_bits
def shortest_reg(bits):
"""This is an implementation of the Berlekamp-Massey algorithm
for binary sequences that returns the shortest linear-feedback
register that can generate this sequence"""
n = len(bits)
b = [1] + [0 for i in range(n - 1)]
c = b[:]
L = 0
m = -1
for N in range(n):
d = sum([c[i]*bits[N-i] for i in range(L+1)]) % 2
if d != 0:
t = c[:]
c[N-m:] = [(c[i] != b[i-N+m]) for i in range(N-m,n)]
if L <= N/2:
L = N + 1 -L
m = N
b = t
return L
def theor_mean(bksz):
"""The theoretical mean under an assumption of randomness"""
return bksz / 2 + (9 + (-1)**(bksz + 1)) / 36 - (bksz / 3 + 2/9) / 2**bksz
def tabulate(blocks):
"""Used in computing the test statistic. Assumes that all blocks are of the same size,
if they are not it will produce a meaningless result without raising an error."""
tab = {i:0 for i in range(7)}
bksz = len(blocks[0])
mean = theor_mean(bksz)
for block in blocks:
bkm = shortest_reg(block)
T = (-1)**bksz * (bkm - mean) + 2/9
if T <= -2.5:
tab[0] += 1
elif T > 2.5:
tab[6] += 1
else:
for i in range(1,6):
if (i - 3.5) < T <= (i - 2.5):
tab[i] += 1
break
return tab
def theor_prob():
"""These are given in the NIST manual."""
return [0.010417, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]
def linear_complexity(block, bksz=1000):
blocks = partition_bits(block, bksz)
prob = theor_prob()
tab = tabulate(blocks)
chisq = sum([((tab[i] - bksz * prob[i])**2) / (bksz * prob[i]) for i in range(7)])
return 1 - chi2.cdf(chisq, 6)
def israndom(pvalue):
if pvalue < 0.01:
return False
else:
return True
| gpl-3.0 |
petrutlucian94/nova_dev | nova/wsgi.py | 2 | 17814 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import os.path
import socket
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import ssl
import webob.dec
import webob.exc
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=1000,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = CONF.wsgi_default_pool_size
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
self._socket = eventlet.wrap_ssl(self._socket,
**ssl_kwargs)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self._socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support") % self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self._socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format,
'debug': False
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug(_("Loading app %(name)s from %(path)s") %
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| apache-2.0 |
anrl/gini | backend/src/gloader/xml/xpath/ParsedStep.py | 10 | 3414 | ########################################################################
#
# File Name: ParsedStep.py
#
#
"""
A Parsed token that represents a step on the result tree.
WWW: http://4suite.org/XPATH e-mail: support@4suite.org
Copyright (c) 2000-2001 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.org/COPYRIGHT for license and copyright information
"""
from xml.dom import Node
from xml.xpath import Util
from xml.xpath import NamespaceNode
import sys
class ParsedStep:
def __init__(self, axis, nodeTest, predicates=None):
self._axis = axis
self._nodeTest = nodeTest
self._predicates = predicates
return
def evaluate(self, context):
"""
Select a set of nodes from the axis, then filter through the node
test and the predicates.
"""
(node_set, reverse) = self._axis.select(context, self._nodeTest.match)
if self._predicates and len(node_set):
node_set = self._predicates.filter(node_set, context, reverse)
return node_set
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._axis.pprint(indent + ' ')
self._nodeTest.pprint(indent + ' ')
self._predicates and self._predicates.pprint(indent + ' ')
def __str__(self):
return '<Step at %x: %s>' % (id(self), repr(self))
def __repr__(self):
result = repr(self._axis) + '::' + repr(self._nodeTest)
if self._predicates:
result = result + repr(self._predicates)
return result
class ParsedAbbreviatedStep:
def __init__(self, parent):
self.parent = parent
def evaluate(self, context):
if self.parent:
if context.node.nodeType == Node.ATTRIBUTE_NODE:
return [context.node.ownerElement]
return context.node.parentNode and [context.node.parentNode] or []
return [context.node]
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
def __str__(self):
return '<AbbreviatedStep at %x: %s>' % (id(self), repr(self))
def __repr__(self):
return self.parent and '..' or '.'
# From the XPath 2.0 Working Draft
# Used by XPointer
class ParsedNodeSetFunction:
def __init__(self, function, predicates=None):
self._function = function
self._predicates = predicates
return
def evaluate(self, context):
"""
Select a set of nodes from the node-set function then filter
through the predicates.
"""
node_set = self._function.evaluate(context)
if type(node_set) != type([]):
raise SyntaxError('%s does not evaluate to a node-set' %
repr(self._function))
if self._predicates and len(node_set):
node_set = self._predicates.filter(node_set, context, reverse)
return node_set
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._function.pprint(indent + ' ')
self._predicates and self._predicates.pprint(indent + ' ')
def __str__(self):
return '<Step at %x: %s>' % (id(self), repr(self))
def __repr__(self):
result = repr(self._function)
if self._predicates:
result = result + repr(self._predicates)
return result
| mit |
coderjames/pascal | quex-0.63.1/quex/engine/generator/skipper/range.py | 1 | 16427 | import quex.engine.state_machine.index as sm_index
from quex.engine.generator.skipper.common import line_counter_in_loop, \
end_delimiter_is_subset_of_indentation_counter_newline, \
get_character_sequence, \
get_on_skip_range_open, \
line_column_counter_in_loop
from quex.engine.generator.languages.address import __nice, get_label
import quex.engine.generator.languages.variable_db as variable_db
from quex.blackboard import setup as Setup
from quex.engine.misc.string_handling import blue_print
import quex.blackboard as blackboard
from quex.blackboard import E_StateIndices
from copy import copy
def do(Data):
ClosingSequence = Data["closer_sequence"]
ModeName = Data["mode_name"]
assert type(ModeName) in [str, unicode]
assert Data.has_key("indentation_counter_terminal_id")
indentation_counter_terminal_id = Data["indentation_counter_terminal_id"]
Mode = None
if ModeName != "":
Mode = blackboard.mode_db[ModeName]
code_str, db = get_skipper(ClosingSequence, Mode, indentation_counter_terminal_id)
return code_str, db
template_str = """
$$DELIMITER_COMMENT$$
text_end = QUEX_NAME(Buffer_text_end)(&me->buffer);
$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$
$$ENTRY$$
QUEX_BUFFER_ASSERT_CONSISTENCY(&me->buffer);
__quex_assert(QUEX_NAME(Buffer_content_size)(&me->buffer) >= Skipper$$SKIPPER_INDEX$$L );
/* NOTE: If _input_p == end of buffer, then it will drop out immediately out of the
* loop below and drop into the buffer reload procedure. */
/* Loop eating characters: Break-out as soon as the First Character of the Delimiter
* (FCD) is reached. Thus, the FCD plays also the role of the Buffer Limit Code. There
* are two reasons for break-out:
* (1) we reached a limit (end-of-file or buffer-limit)
* (2) there was really the FCD in the character stream
* This must be distinguished after the loop was exited. But, during the 'swallowing' we
* are very fast, because we do not have to check for two different characters. */
*text_end = Skipper$$SKIPPER_INDEX$$[0]; /* Overwrite BufferLimitCode (BLC). */
_$$SKIPPER_INDEX$$_LOOP:
$$INPUT_GET$$
$$IF_INPUT_EQUAL_DELIMITER_0$$
goto _$$SKIPPER_INDEX$$_LOOP_EXIT;
$$ENDIF$$
$$LC_COUNT_IN_LOOP$$
$$INPUT_P_INCREMENT$$ /* Now, BLC cannot occur. See above. */
goto _$$SKIPPER_INDEX$$_LOOP;
_$$SKIPPER_INDEX$$_LOOP_EXIT:
*text_end = QUEX_SETTING_BUFFER_LIMIT_CODE; /* Reset BLC. */
/* Case (1) and (2) from above can be distinguished easily:
*
* (1) Distance to text end == 0:
* End-of-File or Buffer-Limit.
* => goto to drop-out handling
*
* (2) Else:
* First character of delimit reached.
* => For the verification of the tail of the delimiter it is
* essential that it is loaded completely into the buffer.
* For this, it must be required:
*
* Distance to text end >= Delimiter length
*
* _input_p end
* | | end - _input_p >= 3
* [ ][R][E][M][#]
*
* The case of reload should be seldom and is costy anyway.
* Thus let's say, that in this case we simply enter the drop
* out and start the search for the delimiter all over again.
*
* (2.1) Distance to text end < Delimiter length
* => goto to drop-out handling
* (2.2) Start detection of tail of delimiter
*
*/
if( QUEX_NAME(Buffer_distance_input_to_text_end)(&me->buffer) < (ptrdiff_t)Skipper$$SKIPPER_INDEX$$L ) {
/* (2.1) Reload required. */
goto $$GOTO_RELOAD$$;
}
$$LC_ON_FIRST_DELIMITER$$
/* (2.2) Test the remaining delimiter, but note, that the check must restart at '_input_p + 1'
* if any later check fails. */
$$INPUT_P_INCREMENT$$
/* Example: Delimiter = '*', '/'; if we get ...[*][*][/]... then the the first "*" causes
* a drop out out of the 'swallowing loop' and the second "*" will mismatch
* the required "/". But, then the second "*" must be presented to the
* swallowing loop and the letter after it completes the 'match'.
* (The whole discussion, of course, is superflous if the range delimiter has length 1.) */
$$DELIMITER_REMAINDER_TEST$$
{
/* NOTE: The initial state does not increment the input_p. When it detects that
* it is located on a buffer border, it automatically triggers a reload. No
* need here to reload the buffer. */
$$LC_COUNT_END_PROCEDURE$$
/* No need for re-entry preparation. Acceptance flags and modes are untouched after skipping. */
$$GOTO_AFTER_END_OF_SKIPPING$$ /* End of range reached. */
}
$$RELOAD$$:
QUEX_BUFFER_ASSERT_CONSISTENCY_LIGHT(&me->buffer);
/* -- When loading new content it is checked that the beginning of the lexeme
* is not 'shifted' out of the buffer. In the case of skipping, we do not care about
* the lexeme at all, so do not restrict the load procedure and set the lexeme start
* to the actual input position. */
$$MARK_LEXEME_START$$
$$LC_COUNT_BEFORE_RELOAD$$
/* -- According to case (2.1) is is possible that the _input_p does not point to the end
* of the buffer, thus we record the current position in the lexeme start pointer and
* recover it after the loading. */
me->buffer._input_p = text_end;
if( QUEX_NAME(Buffer_is_end_of_file)(&me->buffer) == false ) {
QUEX_NAME(buffer_reload_forward)(&me->buffer, (QUEX_TYPE_CHARACTER_POSITION*)position, PositionRegisterN);
/* Recover '_input_p' from lexeme start
* (inverse of what we just did before the loading) */
$$INPUT_P_TO_LEXEME_START$$
/* After reload, we need to increment _input_p. That's how the game is supposed to be played.
* But, we recovered from lexeme start pointer, and this one does not need to be incremented. */
text_end = QUEX_NAME(Buffer_text_end)(&me->buffer);
$$LC_COUNT_AFTER_RELOAD$$
QUEX_BUFFER_ASSERT_CONSISTENCY(&me->buffer);
$$GOTO_ENTRY$$
}
/* Here, either the loading failed or it is not enough space to carry a closing delimiter */
$$INPUT_P_TO_LEXEME_START$$
$$ON_SKIP_RANGE_OPEN$$
"""
def get_skipper(EndSequence, Mode=None, IndentationCounterTerminalID=None, OnSkipRangeOpenStr=""):
assert type(EndSequence) == list
assert len(EndSequence) >= 1
assert map(type, EndSequence) == [int] * len(EndSequence)
local_variable_db = {}
global template_str
LanguageDB = Setup.language_db
# Name the $$SKIPPER$$
skipper_index = sm_index.get()
# Determine the $$DELIMITER$$
delimiter_str, delimiter_comment_str = get_character_sequence(EndSequence)
delimiter_length = len(EndSequence)
tmp = []
LanguageDB.COMMENT(tmp, " Delimiter: %s" % delimiter_comment_str)
delimiter_comment_str = "".join(tmp)
# Determine the check for the tail of the delimiter
delimiter_remainder_test_str = ""
if len(EndSequence) != 1:
txt = ""
i = 0
for letter in EndSequence[1:]:
i += 1
txt += " %s\n" % LanguageDB.ASSIGN("input", LanguageDB.INPUT_P_DEREFERENCE(i-1))
txt += " %s" % LanguageDB.IF_INPUT("!=", "Skipper$$SKIPPER_INDEX$$[%i]" % i)
txt += " %s" % LanguageDB.GOTO(skipper_index)
txt += " %s" % LanguageDB.END_IF()
delimiter_remainder_test_str = txt
if not end_delimiter_is_subset_of_indentation_counter_newline(Mode, EndSequence):
goto_after_end_of_skipping_str = LanguageDB.GOTO(E_StateIndices.ANALYZER_REENTRY)
else:
# If there is indentation counting involved, then the counter's terminal id must
# be determined at this place.
assert IndentationCounterTerminalID is not None
# If the ending delimiter is a subset of what the 'newline' pattern triggers
# in indentation counting => move on to the indentation counter.
goto_after_end_of_skipping_str = LanguageDB.GOTO_TERMINAL(IndentationCounterTerminalID)
if OnSkipRangeOpenStr != "": on_skip_range_open_str = OnSkipRangeOpenStr
else: on_skip_range_open_str = get_on_skip_range_open(Mode, EndSequence)
# The main part
code_str = blue_print(template_str,
[
["$$DELIMITER_COMMENT$$", delimiter_comment_str],
["$$INPUT_P_INCREMENT$$", LanguageDB.INPUT_P_INCREMENT()],
["$$INPUT_P_DECREMENT$$", LanguageDB.INPUT_P_DECREMENT()],
["$$INPUT_GET$$", LanguageDB.ACCESS_INPUT()],
["$$IF_INPUT_EQUAL_DELIMITER_0$$", LanguageDB.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
["$$ENDIF$$", LanguageDB.END_IF()],
["$$ENTRY$$", LanguageDB.LABEL(skipper_index)],
["$$RELOAD$$", get_label("$reload", skipper_index)],
["$$GOTO_ENTRY$$", LanguageDB.GOTO(skipper_index)],
["$$INPUT_P_TO_LEXEME_START$$", LanguageDB.INPUT_P_TO_LEXEME_START()],
# When things were skipped, no change to acceptance flags or modes has
# happend. One can jump immediately to the start without re-entry preparation.
["$$GOTO_AFTER_END_OF_SKIPPING$$", goto_after_end_of_skipping_str],
["$$MARK_LEXEME_START$$", LanguageDB.LEXEME_START_SET()],
["$$DELIMITER_REMAINDER_TEST$$", delimiter_remainder_test_str],
["$$ON_SKIP_RANGE_OPEN$$", on_skip_range_open_str],
])
# Line and column number counting
code_str, reference_p_f = __lc_counting_replacements(code_str, EndSequence)
# The finishing touch
code_str = blue_print(code_str,
[["$$SKIPPER_INDEX$$", __nice(skipper_index)],
["$$GOTO_RELOAD$$", get_label("$reload", skipper_index)]])
if reference_p_f:
variable_db.enter(local_variable_db, "reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")
variable_db.enter(local_variable_db, "Skipper%i", "{ %s }" % delimiter_str, delimiter_length, Index=skipper_index)
variable_db.enter(local_variable_db, "Skipper%iL", "%i" % delimiter_length, Index=skipper_index)
variable_db.enter(local_variable_db, "text_end")
return code_str, local_variable_db
def __lc_counting_replacements(code_str, EndSequence):
"""Line and Column Number Counting(Range Skipper):
-- in loop if there appears a newline, then do:
increment line_n
set position from where to count column_n
-- at end of skipping do one of the following:
if end delimiter contains newline:
column_n = number of letters since last new line in end delimiter
increment line_n by number of newlines in end delimiter.
(NOTE: in this case the setting of the position from where to count
the column_n can be omitted.)
else:
column_n = current_position - position from where to count column number.
NOTE: On reload we do count the column numbers and reset the column_p.
"""
LanguageDB = Setup.language_db
def get_character_n_after_last_newline(Sequence):
tmp = copy(Sequence)
tmp.reverse()
try: return tmp.index(ord('\n'))
except: return -1
char_n_after_last_newline = get_character_n_after_last_newline(EndSequence)
reference_p_def = ""
in_loop = ""
end_procedure = ""
before_reload = ""
after_reload = ""
on_first_delimiter = ""
reference_p_required_f = False
# Line/Column Counting:
newline_number_in_delimiter = EndSequence.count(ord('\n'))
if EndSequence == map(ord, "\n") or EndSequence == map(ord, "\r\n"):
# (1) If the end-delimiter is a newline
# => there cannot appear a newline inside the comment
# => IN LOOP: no line number increment
# no reference pointer required for column counting
end_procedure += " __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n"
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
else:
# (2) If end-delimiter is NOT newline
# => there can appear a newline inside the comment
if newline_number_in_delimiter == 0:
# -- no newlines in delimiter => line and column number
# must be counted.
in_loop = line_column_counter_in_loop()
end_procedure = " __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
" - reference_p));\n"
reference_p_required_f = True
else:
# -- newline inside delimiter => line number must be counted
# column number is fixed.
end_procedure = " __QUEX_IF_COUNT_COLUMNS_SET((size_t)%i);\n" \
% (char_n_after_last_newline + 1)
if EndSequence[0] == ord('\n') \
or len(EndSequence) > 1 and EndSequence[0:2] == [ord('\r'), ord('\n')]:
# If the first character in the sequence is newline, then the line counting
# may is prevented by the loop exit. Now, we need to count.
on_first_delimiter = "/* First delimiter char was a newline */\n" + \
" __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % (newline_number_in_delimiter - 1)
else:
in_loop = line_counter_in_loop()
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % newline_number_in_delimiter
if reference_p_required_f:
reference_p_def = " __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
before_reload = " __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
" - reference_p));\n"
after_reload = " __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
if len(EndSequence) > 1:
end_procedure = "%s\n%s" % (LanguageDB.INPUT_P_ADD(len(EndSequence)-1), end_procedure)
return blue_print(code_str,
[["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
["$$LC_COUNT_IN_LOOP$$", in_loop],
["$$LC_COUNT_END_PROCEDURE$$", end_procedure],
["$$LC_COUNT_BEFORE_RELOAD$$", before_reload],
["$$LC_COUNT_AFTER_RELOAD$$", after_reload],
["$$LC_ON_FIRST_DELIMITER$$", on_first_delimiter],
]), \
reference_p_required_f
| bsd-2-clause |
maestrano/openerp | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/tiny_socket.py | 386 | 3270 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import socket
import cPickle
import cStringIO
import marshal
class Myexception(Exception):
def __init__(self, faultCode, faultString):
self.faultCode = faultCode
self.faultString = faultString
self.args = (faultCode, faultString)
class mysocket:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.sock.settimeout(120)
def connect(self, host, port=False):
if not port:
protocol, buf = host.split('//')
host, port = buf.split(':')
self.sock.connect((host, int(port)))
def disconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
def mysend(self, msg, exception=False, traceback=None):
msg = cPickle.dumps([msg,traceback])
size = len(msg)
self.sock.send('%8d' % size)
self.sock.send(exception and "1" or "0")
totalsent = 0
while totalsent < size:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError, "Socket connection broken."
totalsent = totalsent + sent
def myreceive(self):
buf=''
while len(buf) < 8:
chunk = self.sock.recv(8 - len(buf))
if chunk == '':
raise RuntimeError, "Socket connection broken."
buf += chunk
size = int(buf)
buf = self.sock.recv(1)
if buf != "0":
exception = buf
else:
exception = False
msg = ''
while len(msg) < size:
chunk = self.sock.recv(size-len(msg))
if chunk == '':
raise RuntimeError, "Socket connection broken."
msg = msg + chunk
msgio = cStringIO.StringIO(msg)
unpickler = cPickle.Unpickler(msgio)
unpickler.find_global = None
res = unpickler.load()
if isinstance(res[0],Exception):
if exception:
raise Myexception(str(res[0]), str(res[1]))
raise res[0]
else:
return res[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
beck/django | tests/model_forms/tests.py | 85 | 109361 | from __future__ import unicode_literals
import datetime
import os
from decimal import Decimal
from unittest import skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils._os import upath
from .models import (
Article, ArticleStatus, Author, Author1, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CommaSeparatedInteger,
CustomErrorMessage, CustomFF, CustomFieldForExclusionModel, DateTimePost,
DerivedBook, DerivedPost, Document, ExplicitPK, FilePathModel,
FlexibleDatePost, Homepage, ImprovedArticle, ImprovedArticleWithParentLink,
Inventory, Person, Photo, Post, Price, Product, Publication,
PublicationDefaults, Student, StumpJoke, TextFile, Triple, Writer,
WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
self.assertRaises(ValueError, NoModelModelForm)
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super(FormForTestingIsValid, self).__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user',
last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
except FieldError as e:
# Make sure the exception contains some reference to the
# field responsible for the problem.
self.assertIn('no-field', e.args[0])
else:
self.fail('Invalid "no-field" field not caught')
def test_extra_declared_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
except FieldError:
self.fail('Declarative field raised FieldError incorrectly')
def test_extra_field_modelform_factory(self):
self.assertRaises(FieldError, modelform_factory,
Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
with self.assertRaises(ValueError):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaises(ValueError):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20"></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally",
posted=datetime.datetime(2008, 9, 3, 10, 10, 1))
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': "%(model_name)s's %(field_label)s not unique for %(date_field_label)s date.",
}
}
Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
def test_reuse_prefetched(self):
# model_to_dict should not hit the database if it can reuse
# the data populated by prefetch_related.
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art = Article.objects.prefetch_related('categories').get(pk=art.pk)
with self.assertNumQueries(0):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third")
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(Article, fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield)
form = ModelForm()
self.assertHTMLEqual(form.as_ul(), """<li><label for="id_headline">Headline:</label> <input id="id_headline" type="text" name="headline" maxlength="50" /></li>
<li><label for="id_categories">Categories:</label> <select multiple="multiple" name="categories" id="id_categories">
<option value="%d" selected="selected">Entertainment</option>
<option value="%d" selected="selected">It&39;s a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(self.c1.id), six.text_type(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [six.text_type(self.c1.id), six.text_type(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
class ModelChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
# ModelChoiceField ############################################################
def test_modelchoicefield(self):
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
self.assertEqual(f.clean(self.c3.id).name, 'Third')
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(f.clean(c4.id).name, 'Fourth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='4th').delete()
with self.assertRaises(ValidationError):
f.clean(c4.id)
def test_modelchoicefield_choices(self):
f = forms.ModelChoiceField(Category.objects.filter(pk=self.c1.id), required=False)
self.assertIsNone(f.clean(''))
self.assertEqual(f.clean(str(self.c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# len can be called on choices
self.assertEqual(len(f.choices), 2)
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
with self.assertRaises(ValidationError):
f.clean(self.c3.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (self.c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'category Entertainment'),
(self.c2.pk, "category It's a test"),
(self.c3.pk, 'category Third')])
def test_modelchoicefield_11183(self):
"""
Regression test for ticket #11183.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all())
form1 = ModelChoiceForm()
field1 = form1.fields['category']
# To allow the widget to change the queryset of field1.widget.choices correctly,
# without affecting other forms, the following must hold:
self.assertIsNot(field1, ModelChoiceForm.base_fields['category'])
self.assertIs(field1.widget.choices.field, field1)
def test_modelchoicefield_22745(self):
"""
#22745 -- Make sure that ModelChoiceField with RadioSelect widget
doesn't produce unnecessary db queries when accessing its BoundField's
attrs.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all(), widget=forms.RadioSelect)
form = ModelChoiceForm()
field = form['category'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
class ModelMultipleChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(self.c1.id), str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean([self.c1.id, str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean((self.c1.id, str(self.c2.id))),
["Entertainment", "It's a test"], ordered=False)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
Test that ModelMultipleChoiceField does O(1) queries instead of
O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
Test that ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(),
validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True,
queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
form = WriterForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" /></p>''' % (self.w_woodward.pk, self.w_royko.pk))
data = {
'writer': six.text_type(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" /></p>''' % (self.w_woodward.pk, self.w_royko.pk))
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['publication'], None)
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertEqual(new_author.publication, None)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertEqual(f.clean(False), False)
self.assertEqual(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
self.assertRaises(ValidationError, f.clean, False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', six.text_type(form))
self.assertNotIn('myfile-clear', six.text_type(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', six.text_type(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertEqual(bool(doc.myfile), False)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'})
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = six.text_type(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""
Regression test for #8842: FilePathField(blank=True)
"""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
names = [p[1] for p in form['path'].field.choices]
names.sort()
self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'tests.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_comma_separated_integer_field(self):
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
fields = '__all__'
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1'})
f = CommaSeparatedIntegerForm({'field': '12'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '12'})
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '10,32'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '10,32'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" /></p>
<p><label for="id_colours">Colours:</label> <select multiple="multiple" name="colours" id="id_colours">
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = '__all__'
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_title">Title:</label> <input id="id_title" maxlength="30" name="title" type="text" /></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" />
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}" /></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected="selected">direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di" /></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected="selected">Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3" />
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields.keys()), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin(object):
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (Mixin, Form), {})().fields.keys()), [])
self.assertEqual(list(type(str('NewForm'), (Form2, Mixin, Form), {})().fields.keys()), ['foo'])
self.assertEqual(list(type(str('NewForm'), (Mixin, ModelForm, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Mixin, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form, Mixin), {})().fields.keys()), ['name', 'age'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form), {'age': None})().fields.keys()), ['name'])
def test_field_removal_name_clashes(self):
"""Regression test for https://code.djangoproject.com/ticket/22510."""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = () # We don't need any fields from the model
class LimitChoicesToTest(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
def setUp(self):
self.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
self.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['most_recently_fooled'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['most_recently_fooled'].queryset)
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToMany relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['has_fooled_today'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['has_fooled_today'].queryset)
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
Regression test for #23795: Make sure a custom field with a `queryset`
attribute but no `limit_choices_to` still works.
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIs(Form.base_fields['name'].widget, widget)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""Test that a custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args,
[(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
self.assertRaises(TypeError, modelform_factory, Person, fields="__all__",
formfield_callback='not a function or callable')
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
with self.assertRaises(TypeError):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super(CustomMetaclass, cls).__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(six.with_metaclass(CustomMetaclass, forms.ModelForm)):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
| bsd-3-clause |
paulocoding/DataScienceMachineLearning | DataMunging/DataProcessing.py | 2 | 1705 | import pandas as pd
def sum_of_digits(str_value):
"""
Sum up all the digits in a number till it is single digit
Eg:
1 => 1
11 => 2
123 => 6
1235 => 2
98 => 8
"""
total = 0
for num in str_value:
total += int(num)
if total > 9:
return sum_of_digits(str(total))
return total
def do_processing(file_name):
# Read an excel file. Make sure it is in in XLSX format and
# you have XlsxWriter package installed.
# Pandas uses this package to read excel files.
df = pd.read_excel(file_name)
# Split the name into first name and last name
# You will get a pandas Series which has 1 column with a list in each row
fn_ln_list = df['Name'].str.split(' ')
# Use list comprehension to build a list for the first name and last name
df['first_name'] = [name[0] for name in fn_ln_list]
df['last_name'] = [name[1] for name in fn_ln_list]
# Pandas DataFrame automatically recognizes the date field and converts
# it into a datetime object. Using strftime to convert the datetime object
# to a string in the format DDMMYYYY
df['dob'] = df['Date Of Birth'].apply(lambda x: x.strftime('%d%m%Y'))
# Sum the numbers in DOB to a single digit
# Create a new field to save the sum of digits
df['sum_dob'] = df['dob'].apply(sum_of_digits)
print "\n\n\nDataFrame:\n"
print "----------\n", df
print "\n\n\nDataFrame Columns:\n"
print "------------------\n", df.columns
print "\n\n\nDataFrame Data Types:\n"
print "---------------------\n", df.dtypes
if __name__ == '__main__':
file_name = 'person_details.xlsx'
do_processing(file_name) | mit |
betoesquivel/fil2014 | filenv/lib/python2.7/site-packages/django/contrib/staticfiles/finders.py | 90 | 9285 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import empty, memoize, LazyObject
from django.utils.module_loading import import_by_path
from django.utils._os import safe_join
from django.utils import six
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
def _get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_by_path(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
get_finder = memoize(_get_finder, _finders, 1)
| mit |
krbaker/Diamond | src/collectors/http/http.py | 7 | 3323 | # coding=utf-8
"""
Collect statistics from a HTTP or HTTPS connexion
#### Dependencies
* urllib2
#### Usage
Add the collector config as :
enabled = True
ttl_multiplier = 2
path_suffix = ""
measure_collector_time = False
byte_unit = byte,
req_vhost = www.my_server.com
req_url = https://www.my_server.com/, https://www.my_server.com/assets/jquery.js
Metrics are collected as :
- servers.<hostname>.http.<url>.size (size of the page received in bytes)
- servers.<hostname>.http.<url>.time (time to download the page in microsec)
'.' and '/' chars are replaced by __, url looking like
http://www.site.com/admin/page.html are replaced by
http:__www_site_com_admin_page_html
"""
import urllib2
import diamond.collector
import datetime
class HttpCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HttpCollector, self).get_default_config_help()
config_help.update({
'req_port': 'Port',
'req_url':
'array of full URL to get (ex : https://www.ici.net/mypage.html)',
'req_vhost':
'Host header variable if needed. Will be added to every request',
})
return config_help
def get_default_config(self):
default_config = super(HttpCollector, self).get_default_config()
default_config['path'] = 'http'
default_config['req_vhost'] = ''
default_config['req_url'] = ['http://localhost/']
default_config['headers'] = {'User-Agent': 'Diamond HTTP collector', }
return default_config
def collect(self):
# create urllib2 vars
if self.config['req_vhost'] != "":
self.config['headers']['Host'] = self.config['req_vhost']
# time the request
for url in self.config['req_url']:
self.log.debug("collecting %s", str(url))
req_start = datetime.datetime.now()
req = urllib2.Request(url, headers=self.config['headers'])
try:
handle = urllib2.urlopen(req)
the_page = handle.read()
req_end = datetime.datetime.now()
req_time = req_end - req_start
# build a compatible name : no '.' and no'/' in the name
metric_name = url.replace(
'/', '_').replace(
'.', '_').replace(
'\\', '').replace(
':', '')
#metric_name = url.split("/")[-1].replace(".", "_")
if metric_name == '':
metric_name = "root"
self.publish_gauge(
metric_name + '.time',
req_time.seconds*1000000+req_time.microseconds)
self.publish_gauge(
metric_name + '.size',
len(the_page))
except IOError, e:
self.log.error("Unable to open %s",
self.config['req_url'])
except Exception, e:
self.log.error("Unknown error opening url: %s", e)
| mit |
crimsonthunder/kernel_samsung_trlte_5.1.1 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
TEAM-Gummy/platform_external_chromium_org | tools/telemetry/telemetry/core/timeline/process.py | 23 | 2296 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.core.timeline.event_container as event_container
import telemetry.core.timeline.counter as tracing_counter
import telemetry.core.timeline.thread as tracing_thread
class Process(event_container.TimelineEventContainer):
''' The Process represents a single userland process in the trace.
'''
def __init__(self, parent, pid):
super(Process, self).__init__('process %s' % pid, parent)
self.pid = pid
self._threads = {}
self._counters = {}
@property
def threads(self):
return self._threads
@property
def counters(self):
return self._counters
def IterChildContainers(self):
for thread in self._threads.itervalues():
yield thread
for counter in self._counters.itervalues():
yield counter
def IterAllSlicesOfName(self, name):
for thread in self._threads.itervalues():
for s in thread.IterAllSlicesOfName(name):
yield s
def IterEventsInThisContainer(self):
return
yield # pylint: disable=W0101
def GetOrCreateThread(self, tid):
thread = self.threads.get(tid, None)
if thread:
return thread
thread = tracing_thread.Thread(self, tid)
self._threads[tid] = thread
return thread
def GetCounter(self, category, name):
counter_id = category + '.' + name
if counter_id in self.counters:
return self.counters[counter_id]
raise ValueError(
'Counter %s not found in process with id %s.' % (counter_id,
self.pid))
def GetOrCreateCounter(self, category, name):
try:
return self.GetCounter(category, name)
except ValueError:
ctr = tracing_counter.Counter(self, category, name)
self._counters[ctr.full_name] = ctr
return ctr
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for thread in self._threads.itervalues():
thread.AutoCloseOpenSlices(max_timestamp, max_thread_timestamp)
def FinalizeImport(self):
for thread in self._threads.itervalues():
thread.FinalizeImport()
for counter in self._counters.itervalues():
counter.FinalizeImport()
| bsd-3-clause |
rekbun/browserscope | gaeunit_test.py | 9 | 1328 | #!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers. See test/test_gaeunit.py for actual tests."""
import logging
from google.appengine.ext import db
import django
from django import http
class TaskTrace(db.Model):
KEY_NAME = 'task_trace'
method = db.StringProperty()
encoding = db.StringProperty()
param = db.StringProperty()
def TaskHandler(request):
param = None
if request.method == 'GET':
param = request.GET.get('the_get_param')
elif request.method == 'POST':
param = request.POST.get('the_post_param')
task_trace = TaskTrace(key_name=TaskTrace.KEY_NAME)
task_trace.method = request.method
task_trace.encoding = request.encoding
task_trace.param = param
task_trace.put()
return http.HttpResponse('Saved Task Details')
| apache-2.0 |
lambday/shogun | examples/undocumented/python/kernel_weighted_degree_position_string.py | 10 | 1114 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,20],[traindat,testdat,22]]
def kernel_weighted_degree_position_string (fm_train_dna=traindat,fm_test_dna=testdat,degree=20):
from shogun import StringCharFeatures, DNA
from shogun import WeightedDegreePositionStringKernel, MSG_DEBUG
feats_train=StringCharFeatures(fm_train_dna, DNA)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_test=StringCharFeatures(fm_test_dna, DNA)
kernel=WeightedDegreePositionStringKernel(feats_train, feats_train, degree)
from numpy import zeros,ones,float64,int32
kernel.set_shifts(10*ones(len(fm_train_dna[0]), dtype=int32))
kernel.set_position_weights(ones(len(fm_train_dna[0]), dtype=float64))
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('WeightedDegreePositionString')
kernel_weighted_degree_position_string(*parameter_list[0])
| bsd-3-clause |
OptiPop/external_chromium_org | third_party/cython/src/Cython/Compiler/CythonScope.py | 99 | 5817 | from Symtab import ModuleScope
from PyrexTypes import *
from UtilityCode import CythonUtilityCode
from Errors import error
from Scanning import StringSourceDescriptor
import MemoryView
class CythonScope(ModuleScope):
is_cython_builtin = 1
_cythonscope_initialized = False
def __init__(self, context):
ModuleScope.__init__(self, u'cython', None, None)
self.pxd_file_loaded = True
self.populate_cython_scope()
# The Main.Context object
self.context = context
for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
entry = self.declare_typedef(fused_type.name,
fused_type,
None,
cname='<error>')
entry.in_cinclude = True
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
return super(CythonScope, self).lookup_type(name)
def lookup(self, name):
entry = super(CythonScope, self).lookup(name)
if entry is None and not self._cythonscope_initialized:
self.load_cythonscope()
entry = super(CythonScope, self).lookup(name)
return entry
def find_module(self, module_name, pos):
error("cython.%s is not available" % module_name, pos)
def find_submodule(self, module_name):
entry = self.entries.get(module_name, None)
if not entry:
self.load_cythonscope()
entry = self.entries.get(module_name, None)
if entry and entry.as_module:
return entry.as_module
else:
# TODO: fix find_submodule control flow so that we're not
# expected to create a submodule here (to protect CythonScope's
# possible immutability). Hack ourselves out of the situation
# for now.
raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
"cython.%s is not available" % module_name)
def lookup_qualified_name(self, qname):
# ExprNode.as_cython_attribute generates qnames and we untangle it here...
name_path = qname.split(u'.')
scope = self
while len(name_path) > 1:
scope = scope.lookup_here(name_path[0]).as_module
del name_path[0]
if scope is None:
return None
else:
return scope.lookup_here(name_path[0])
def populate_cython_scope(self):
# These are used to optimize isinstance in FinalOptimizePhase
type_object = self.declare_typedef(
'PyTypeObject',
base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
type_object_type = type_object.type
self.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
def load_cythonscope(self):
"""
Creates some entries for testing purposes and entries for
cython.array() and for cython.view.*.
"""
if self._cythonscope_initialized:
return
self._cythonscope_initialized = True
cython_testscope_utility_code.declare_in_scope(
self, cython_scope=self)
cython_test_extclass_utility_code.declare_in_scope(
self, cython_scope=self)
#
# The view sub-scope
#
self.viewscope = viewscope = ModuleScope(u'view', self, None)
self.declare_module('view', viewscope, None).as_module = viewscope
viewscope.is_cython_builtin = True
viewscope.pxd_file_loaded = True
cythonview_testscope_utility_code.declare_in_scope(
viewscope, cython_scope=self)
view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
self.viewscope, cython_scope=self,
whitelist=MemoryView.view_utility_whitelist)
# self.entries["array"] = view_utility_scope.entries.pop("array")
def create_cython_scope(context):
# One could in fact probably make it a singleton,
# but not sure yet whether any code mutates it (which would kill reusing
# it across different contexts)
return CythonScope(context)
# Load test utilities for the cython scope
def load_testscope_utility(cy_util_name, **kwargs):
return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
undecorated_methods_protos = UtilityCode(proto=u"""
/* These methods are undecorated and have therefore no prototype */
static PyObject *__pyx_TestClass_cdef_method(
struct __pyx_TestClass_obj *self, int value);
static PyObject *__pyx_TestClass_cpdef_method(
struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
static PyObject *__pyx_TestClass_def_method(
PyObject *self, PyObject *value);
""")
cython_testscope_utility_code = load_testscope_utility("TestScope")
test_cython_utility_dep = load_testscope_utility("TestDep")
cython_test_extclass_utility_code = \
load_testscope_utility("TestClass", name="TestClass",
requires=[undecorated_methods_protos,
test_cython_utility_dep])
cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
| bsd-3-clause |
mikeconley/emscripten-scummvm | devtools/tasmrecover/tasm/parser.py | 56 | 7994 | # ScummVM - Graphic Adventure Engine
#
# ScummVM is the legal property of its developers, whose names
# are too numerous to list here. Please refer to the COPYRIGHT
# file distributed with this source distribution.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os, re
from proc import proc
import lex
import op
class parser:
def __init__(self, skip_binary_data = []):
self.skip_binary_data = skip_binary_data
self.strip_path = 0
self.__globals = {}
self.__offsets = {}
self.__stack = []
self.proc = None
self.proc_list = []
self.binary_data = []
self.symbols = []
self.link_later = []
def visible(self):
for i in self.__stack:
if not i or i == 0:
return False
return True
def push_if(self, text):
value = self.eval(text)
#print "if %s -> %s" %(text, value)
self.__stack.append(value)
def push_else(self):
#print "else"
self.__stack[-1] = not self.__stack[-1]
def pop_if(self):
#print "endif"
return self.__stack.pop()
def set_global(self, name, value):
if len(name) == 0:
raise Exception("empty name is not allowed")
name = name.lower()
#print "adding global %s -> %s" %(name, value)
if self.__globals.has_key(name):
raise Exception("global %s was already defined", name)
self.__globals[name] = value
def get_global(self, name):
name = name.lower()
g = self.__globals[name]
g.used = True
return g
def get_globals(self):
return self.__globals
def has_global(self, name):
name = name.lower()
return self.__globals.has_key(name)
def set_offset(self, name, value):
if len(name) == 0:
raise Exception("empty name is not allowed")
name = name.lower()
#print "adding global %s -> %s" %(name, value)
if self.__offsets.has_key(name):
raise Exception("global %s was already defined", name)
self.__offsets[name] = value
def get_offset(self, name):
name = name.lower()
return self.__offsets[name]
def include(self, basedir, fname):
path = fname.split('\\')[self.strip_path:]
path = os.path.join(basedir, os.path.pathsep.join(path))
#print "including %s" %(path)
self.parse(path)
def eval(self, stmt):
try:
return self.parse_int(stmt)
except:
pass
value = self.__globals[stmt.lower()].value
return int(value)
def expr_callback(self, match):
name = match.group(1).lower()
g = self.get_global(name)
if isinstance(g, op.const):
return g.value
else:
return "0x%04x" %g.offset
def eval_expr(self, expr):
n = 1
while n > 0:
expr, n = re.subn(r'\b([a-zA-Z_]+[a-zA-Z0-9_]*)', self.expr_callback, expr)
return eval(expr)
def expand_globals(self, text):
return text
def fix_dollar(self, v):
print("$ = %d" %len(self.binary_data))
return re.sub(r'\$', "%d" %len(self.binary_data), v)
def parse_int(self, v):
if re.match(r'[01]+b$', v):
v = int(v[:-1], 2)
if re.match(r'[\+-]?[0-9a-f]+h$', v):
v = int(v[:-1], 16)
return int(v)
def compact_data(self, width, data):
#print "COMPACTING %d %s" %(width, data)
r = []
base = 0x100 if width == 1 else 0x10000
for v in data:
if v[0] == '"':
if v[-1] != '"':
raise Exception("invalid string %s" %v)
if width == 2:
raise Exception("string with data width more than 1") #we could allow it :)
for i in xrange(1, len(v) - 1):
r.append(ord(v[i]))
continue
m = re.match(r'(\w+)\s+dup\s+\((\s*\S+\s*)\)', v)
if m is not None:
#we should parse that
n = self.parse_int(m.group(1))
if m.group(2) != '?':
value = self.parse_int(m.group(2))
else:
value = 0
for i in xrange(0, n):
v = value
for b in xrange(0, width):
r.append(v & 0xff);
v >>= 8
continue
try:
v = self.parse_int(v)
if v < 0:
v += base
except:
#global name
print "global/expr: %s" %v
try:
g = self.get_global(v)
v = g.offset
except:
print "unknown address %s" %(v)
self.link_later.append((len(self.binary_data) + len(r), v))
v = 0
for b in xrange(0, width):
r.append(v & 0xff);
v >>= 8
#print r
return r
def parse(self, fname):
# print "opening file %s..." %(fname, basedir)
skipping_binary_data = False
fd = open(fname, 'rb')
for line in fd:
line = line.strip()
if len(line) == 0 or line[0] == ';' or line[0] == chr(0x1a):
continue
#print line
m = re.match('(\w+)\s*?:', line)
if m is not None:
line = line[len(m.group(0)):].strip()
if self.visible():
name = m.group(1)
if not (name.lower() in self.skip_binary_data):
if self.proc is not None:
self.proc.add_label(name)
print "offset %s -> %d" %(name, len(self.binary_data))
self.set_offset(name, (len(self.binary_data), self.proc, len(self.proc.stmts) if self.proc is not None else 0))
skipping_binary_data = False
else:
print "skipping binary data for %s" % (name,)
skipping_binary_data = True
#print line
cmd = line.split()
if len(cmd) == 0:
continue
cmd0 = str(cmd[0])
if cmd0 == 'if':
self.push_if(cmd[1])
continue
elif cmd0 == 'else':
self.push_else()
continue
elif cmd0 == 'endif':
self.pop_if()
continue
if not self.visible():
continue
if cmd0 == 'db' or cmd0 == 'dw' or cmd0 == 'dd':
arg = line[len(cmd0):].strip()
if not skipping_binary_data:
print "%d:1: %s" %(len(self.binary_data), arg) #fixme: COPYPASTE
binary_width = {'b': 1, 'w': 2, 'd': 4}[cmd0[1]]
self.binary_data += self.compact_data(binary_width, lex.parse_args(arg))
continue
elif cmd0 == 'include':
self.include(os.path.dirname(fname), cmd[1])
continue
elif cmd0 == 'endp':
self.proc = None
continue
elif cmd0 == 'assume':
print "skipping: %s" %line
continue
elif cmd0 == 'rep':
self.proc.add(cmd0)
self.proc.add(" ".join(cmd[1:]))
continue
if len(cmd) >= 3:
cmd1 = cmd[1]
if cmd1 == 'equ':
if not (cmd0.lower() in self.skip_binary_data):
v = cmd[2]
self.set_global(cmd0, op.const(self.fix_dollar(v)))
else:
print "skipping binary data for %s" % (cmd0.lower(),)
skipping_binary_data = True
elif cmd1 == 'db' or cmd1 == 'dw' or cmd1 == 'dd':
if not (cmd0.lower() in self.skip_binary_data):
binary_width = {'b': 1, 'w': 2, 'd': 4}[cmd1[1]]
offset = len(self.binary_data)
arg = line[len(cmd0):].strip()
arg = arg[len(cmd1):].strip()
print "%d: %s" %(offset, arg)
self.binary_data += self.compact_data(binary_width, lex.parse_args(arg))
self.set_global(cmd0.lower(), op.var(binary_width, offset))
skipping_binary_data = False
else:
print "skipping binary data for %s" % (cmd0.lower(),)
skipping_binary_data = True
continue
elif cmd1 == 'proc':
name = cmd0.lower()
self.proc = proc(name)
print "procedure %s, #%d" %(name, len(self.proc_list))
self.proc_list.append(name)
self.set_global(name, self.proc)
continue
if (self.proc):
self.proc.add(line)
else:
#print line
pass
fd.close()
return self
def link(self):
for addr, expr in self.link_later:
v = self.eval_expr(expr)
print "link: patching %04x -> %04x" %(addr, v)
while v != 0:
self.binary_data[addr] = v & 0xff
addr += 1
v >>= 8
| gpl-2.0 |
Antiun/purchase-workflow | purchase_requisition_bid_selection/wizard/purchase_requisition_partner.py | 27 | 1568 | # -*- coding: utf-8 -*-
#
#
# Copyright 2013, 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api
class PurchaseRequisitionPartner(models.TransientModel):
_inherit = "purchase.requisition.partner"
@api.multi
def create_order(self):
ActWindow = self.env['ir.actions.act_window']
Requisition = self.env['purchase.requisition']
active_id = self.env.context and self.env.context.get('active_id', [])
requisition = Requisition.browse(active_id)
po_id = requisition.make_purchase_order(self.partner_id.id)[active_id]
if not self.env.context.get('draft_bid', False):
return {'type': 'ir.actions.act_window_close'}
res = ActWindow.for_xml_id('purchase', 'purchase_rfq')
res.update({'res_id': po_id,
'views': [(False, 'form')],
})
return res
| agpl-3.0 |
haveal/googleads-python-lib | examples/dfa/authentication/generate_refresh_token.py | 4 | 2018 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a refresh token for use with DFA."""
import sys
from oauth2client import client
# Your OAuth 2.0 Client ID and Secret. If you do not have an ID and Secret yet,
# please go to https://console.developers.google.com and create a set.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
# The DFA API OAuth 2.0 scope.
SCOPE = u'https://www.googleapis.com/auth/dfatrafficking'
def main():
"""Retrieve and display the access and refresh token."""
flow = client.OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=[SCOPE],
user_agent='Ads Python Client Library',
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
authorize_url = flow.step1_get_authorize_url()
print ('Log into the Google Account you use to access your DFA account'
'and go to the following URL: \n%s\n' % (authorize_url))
print 'After approving the token enter the verification code (if specified).'
code = raw_input('Code: ').strip()
try:
credential = flow.step2_exchange(code)
except client.FlowExchangeError, e:
print 'Authentication has failed: %s' % e
sys.exit(1)
else:
print ('OAuth 2.0 authorization successful!\n\n'
'Your access token is:\n %s\n\nYour refresh token is:\n %s'
% (credential.access_token, credential.refresh_token))
if __name__ == '__main__':
main()
| apache-2.0 |
DefyVentures/edx-platform | lms/djangoapps/mobile_api/video_outlines/serializers.py | 16 | 8501 | """
Serializer for video outline
"""
from rest_framework.reverse import reverse
from xmodule.modulestore.mongo.base import BLOCK_TYPES_WITH_CHILDREN
from courseware.access import has_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.module_utils import get_dynamic_descriptor_children
from edxval.api import (
get_video_info_for_course_and_profiles, ValInternalError
)
class BlockOutline(object):
"""
Serializes course videos, pulling data from VAL and the video modules.
"""
def __init__(self, course_id, start_block, block_types, request, video_profiles):
"""Create a BlockOutline using `start_block` as a starting point."""
self.start_block = start_block
self.block_types = block_types
self.course_id = course_id
self.request = request # needed for making full URLS
self.local_cache = {}
try:
self.local_cache['course_videos'] = get_video_info_for_course_and_profiles(
unicode(course_id), video_profiles
)
except ValInternalError: # pragma: nocover
self.local_cache['course_videos'] = {}
def __iter__(self):
def parent_or_requested_block_type(usage_key):
"""
Returns whether the usage_key's block_type is one of self.block_types or a parent type.
"""
return (
usage_key.block_type in self.block_types or
usage_key.block_type in BLOCK_TYPES_WITH_CHILDREN
)
def create_module(descriptor):
"""
Factory method for creating and binding a module for the given descriptor.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_id, self.request.user, descriptor, depth=0,
)
return get_module_for_descriptor(
self.request.user, self.request, descriptor, field_data_cache, self.course_id
)
child_to_parent = {}
stack = [self.start_block]
while stack:
curr_block = stack.pop()
if curr_block.hide_from_toc:
# For now, if the 'hide_from_toc' setting is set on the block, do not traverse down
# the hierarchy. The reason being is that these blocks may not have human-readable names
# to display on the mobile clients.
# Eventually, we'll need to figure out how we want these blocks to be displayed on the
# mobile clients. As they are still accessible in the browser, just not navigatable
# from the table-of-contents.
continue
if curr_block.location.block_type in self.block_types:
if not has_access(self.request.user, 'load', curr_block, course_key=self.course_id):
continue
summary_fn = self.block_types[curr_block.category]
block_path = list(path(curr_block, child_to_parent, self.start_block))
unit_url, section_url = find_urls(self.course_id, curr_block, child_to_parent, self.request)
yield {
"path": block_path,
"named_path": [b["name"] for b in block_path],
"unit_url": unit_url,
"section_url": section_url,
"summary": summary_fn(self.course_id, curr_block, self.request, self.local_cache)
}
if curr_block.has_children:
children = get_dynamic_descriptor_children(
curr_block,
create_module,
usage_key_filter=parent_or_requested_block_type
)
for block in reversed(children):
stack.append(block)
child_to_parent[block] = curr_block
def path(block, child_to_parent, start_block):
"""path for block"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
if block is not start_block:
block_path.append({
# to be consistent with other edx-platform clients, return the defaulted display name
'name': block.display_name_with_default,
'category': block.category,
'id': unicode(block.location)
})
return reversed(block_path)
def find_urls(course_id, block, child_to_parent, request):
"""
Find the section and unit urls for a block.
Returns:
unit_url, section_url:
unit_url (str): The url of a unit
section_url (str): The url of a section
"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
block_path.append(block)
block_list = list(reversed(block_path))
block_count = len(block_list)
chapter_id = block_list[1].location.block_id if block_count > 1 else None
section = block_list[2] if block_count > 2 else None
position = None
if block_count > 3:
position = 1
for block in section.children:
if block.name == block_list[3].url_name:
break
position += 1
kwargs = {'course_id': unicode(course_id)}
if chapter_id is None:
no_chapter_url = reverse("courseware", kwargs=kwargs, request=request)
return no_chapter_url, no_chapter_url
kwargs['chapter'] = chapter_id
if section is None:
no_section_url = reverse("courseware_chapter", kwargs=kwargs, request=request)
return no_section_url, no_section_url
kwargs['section'] = section.url_name
if position is None:
no_position_url = reverse("courseware_section", kwargs=kwargs, request=request)
return no_position_url, no_position_url
section_url = reverse("courseware_section", kwargs=kwargs, request=request)
kwargs['position'] = position
unit_url = reverse("courseware_position", kwargs=kwargs, request=request)
return unit_url, section_url
def video_summary(video_profiles, course_id, video_descriptor, request, local_cache):
"""
returns summary dict for the given video module
"""
always_available_data = {
"name": video_descriptor.display_name,
"category": video_descriptor.category,
"id": unicode(video_descriptor.scope_ids.usage_id),
"only_on_web": video_descriptor.only_on_web,
}
if video_descriptor.only_on_web:
ret = {
"video_url": None,
"video_thumbnail_url": None,
"duration": 0,
"size": 0,
"transcripts": {},
"language": None,
}
ret.update(always_available_data)
return ret
# Get encoded videos
video_data = local_cache['course_videos'].get(video_descriptor.edx_video_id, {})
# Get highest priority video to populate backwards compatible field
default_encoded_video = {}
if video_data:
for profile in video_profiles:
default_encoded_video = video_data['profiles'].get(profile, {})
if default_encoded_video:
break
if default_encoded_video:
video_url = default_encoded_video['url']
# Then fall back to VideoDescriptor fields for video URLs
elif video_descriptor.html5_sources:
video_url = video_descriptor.html5_sources[0]
else:
video_url = video_descriptor.source
# Get duration/size, else default
duration = video_data.get('duration', None)
size = default_encoded_video.get('file_size', 0)
# Transcripts...
transcript_langs = video_descriptor.available_translations(verify_assets=False)
transcripts = {
lang: reverse(
'video-transcripts-detail',
kwargs={
'course_id': unicode(course_id),
'block_id': video_descriptor.scope_ids.usage_id.block_id,
'lang': lang
},
request=request,
)
for lang in transcript_langs
}
ret = {
"video_url": video_url,
"video_thumbnail_url": None,
"duration": duration,
"size": size,
"transcripts": transcripts,
"language": video_descriptor.get_default_transcript_language(),
"encoded_videos": video_data.get('profiles')
}
ret.update(always_available_data)
return ret
| agpl-3.0 |
shashank971/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_import.py | 125 | 4194 | """
Unittests for importing a course via management command
"""
import os
from path import Path as path
import shutil
import tempfile
from django.core.management import call_command
from django_comment_common.utils import are_permissions_roles_seeded
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestImport(ModuleStoreTestCase):
"""
Unit tests for importing a course from command line
"""
def create_course_xml(self, content_dir, course_id):
directory = tempfile.mkdtemp(dir=content_dir)
os.makedirs(os.path.join(directory, "course"))
with open(os.path.join(directory, "course.xml"), "w+") as f:
f.write('<course url_name="{0.run}" org="{0.org}" '
'course="{0.course}"/>'.format(course_id))
with open(os.path.join(directory, "course", "{0.run}.xml".format(course_id)), "w+") as f:
f.write('<course><chapter name="Test Chapter"></chapter></course>')
return directory
def setUp(self):
"""
Build course XML for importing
"""
super(TestImport, self).setUp()
self.content_dir = path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.content_dir)
self.base_course_key = self.store.make_course_key(u'edX', u'test_import_course', u'2013_Spring')
self.truncated_key = self.store.make_course_key(u'edX', u'test_import', u'2014_Spring')
# Create good course xml
self.good_dir = self.create_course_xml(self.content_dir, self.base_course_key)
# Create course XML where TRUNCATED_COURSE.org == BASE_COURSE_ID.org
# and BASE_COURSE_ID.startswith(TRUNCATED_COURSE.course)
self.course_dir = self.create_course_xml(self.content_dir, self.truncated_key)
def test_forum_seed(self):
"""
Tests that forum roles were created with import.
"""
self.assertFalse(are_permissions_roles_seeded(self.base_course_key))
call_command('import', self.content_dir, self.good_dir)
self.assertTrue(are_permissions_roles_seeded(self.base_course_key))
def test_truncated_course_with_url(self):
"""
Check to make sure an import only blocks true duplicates: new
courses with similar but not unique org/course combinations aren't
blocked if the original course's course starts with the new course's
org/course combinations (i.e. EDx/0.00x/Spring -> EDx/0.00/Spring)
"""
# Load up base course and verify it is available
call_command('import', self.content_dir, self.good_dir)
store = modulestore()
self.assertIsNotNone(store.get_course(self.base_course_key))
# Now load up the course with a similar course_id and verify it loads
call_command('import', self.content_dir, self.course_dir)
self.assertIsNotNone(store.get_course(self.truncated_key))
def test_existing_course_with_different_modulestore(self):
"""
Checks that a course that originally existed in old mongo can be re-imported when
split is the default modulestore.
"""
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
call_command('import', self.content_dir, self.good_dir)
# Clear out the modulestore mappings, else when the next import command goes to create a destination
# course_key, it will find the existing course and return the mongo course_key. To reproduce TNL-1362,
# the destination course_key needs to be the one for split modulestore.
modulestore().mappings = {}
with modulestore().default_store(ModuleStoreEnum.Type.split):
call_command('import', self.content_dir, self.good_dir)
course = modulestore().get_course(self.base_course_key)
# With the bug, this fails because the chapter's course_key is the split mongo form,
# while the course's course_key is the old mongo form.
self.assertEqual(unicode(course.location.course_key), unicode(course.children[0].course_key))
| agpl-3.0 |
angelblue05/Embytest.Kodi | resources/lib/mutagen/asf/_objects.py | 6 | 14054 | # -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import struct
from mutagen._util import cdata, get_size
from mutagen._compat import text_type, xrange, izip
from mutagen._tags import PaddingInfo
from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError
from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute
class BaseObject(object):
"""Base ASF object."""
GUID = None
_TYPES = {}
def __init__(self):
self.objects = []
self.data = b""
def parse(self, asf, data):
self.data = data
def render(self, asf):
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
return data
def get_child(self, guid):
for obj in self.objects:
if obj.GUID == guid:
return obj
return None
@classmethod
def _register(cls, other):
cls._TYPES[other.GUID] = other
return other
@classmethod
def _get_object(cls, guid):
if guid in cls._TYPES:
return cls._TYPES[guid]()
else:
return UnknownObject(guid)
def __repr__(self):
return "<%s GUID=%s objects=%r>" % (
type(self).__name__, bytes2guid(self.GUID), self.objects)
def pprint(self):
l = []
l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID)))
for o in self.objects:
for e in o.pprint().splitlines():
l.append(" " + e)
return "\n".join(l)
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
super(UnknownObject, self).__init__()
assert isinstance(guid, bytes)
self.GUID = guid
@BaseObject._register
class HeaderObject(BaseObject):
"""ASF header."""
GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C")
@classmethod
def parse_full(cls, asf, fileobj):
"""Raises ASFHeaderError"""
header = cls()
size, num_objects = cls.parse_size(fileobj)
for i in xrange(num_objects):
guid, size = struct.unpack("<16sQ", fileobj.read(24))
obj = BaseObject._get_object(guid)
data = fileobj.read(size - 24)
obj.parse(asf, data)
header.objects.append(obj)
return header
@classmethod
def parse_size(cls, fileobj):
"""Returns (size, num_objects)
Raises ASFHeaderError
"""
header = fileobj.read(30)
if len(header) != 30 or header[:16] != HeaderObject.GUID:
raise ASFHeaderError("Not an ASF file.")
return struct.unpack("<QL", header[16:28])
def render_full(self, asf, fileobj, available, padding_func):
# Render everything except padding
num_objects = 0
data = bytearray()
for obj in self.objects:
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
num_objects += 1
# calculate how much space we need at least
padding_obj = PaddingObject()
header_size = len(HeaderObject.GUID) + 14
padding_overhead = len(padding_obj.render(asf))
needed_size = len(data) + header_size + padding_overhead
# ask the user for padding adjustments
file_size = get_size(fileobj)
content_size = file_size - available
assert content_size >= 0
info = PaddingInfo(available - needed_size, content_size)
# add padding
padding = info._get_padding(padding_func)
padding_obj.parse(asf, b"\x00" * padding)
data += padding_obj.render(asf)
num_objects += 1
data = (HeaderObject.GUID +
struct.pack("<QL", len(data) + 30, num_objects) +
b"\x01\x02" + data)
return data
def parse(self, asf, data):
raise NotImplementedError
def render(self, asf):
raise NotImplementedError
@BaseObject._register
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = guid2bytes("75B22633-668E-11CF-A6D9-00AA0062CE6C")
NAMES = [
u"Title",
u"Author",
u"Copyright",
u"Description",
u"Rating",
]
def parse(self, asf, data):
super(ContentDescriptionObject, self).parse(asf, data)
lengths = struct.unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
if length > 0:
texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00"))
else:
texts.append(None)
pos = end
for key, value in izip(self.NAMES, texts):
if value is not None:
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
def render(self, asf):
def render_text(name):
value = asf.to_content_description.get(name)
if value is not None:
return text_type(value).encode("utf-16-le") + b"\x00\x00"
else:
return b""
texts = [render_text(x) for x in self.NAMES]
data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts)
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
@BaseObject._register
class ExtendedContentDescriptionObject(BaseObject):
"""Extended content description."""
GUID = guid2bytes("D2D0A440-E307-11D2-97F0-00A0C95EA850")
def parse(self, asf, data):
super(ExtendedContentDescriptionObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos + 2])
pos += 2
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value_type, value_length = struct.unpack("<HH", data[pos:pos + 4])
pos += 4
value = data[pos:pos + value_length]
pos += value_length
attr = ASFBaseAttribute._get_type(value_type)(data=value)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_extended_content_description.items()
data = b"".join(attr.render(name) for (name, attr) in attrs)
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
return self.GUID + data
@BaseObject._register
class FilePropertiesObject(BaseObject):
"""File properties."""
GUID = guid2bytes("8CABDCA1-A947-11CF-8EE4-00C00C205365")
def parse(self, asf, data):
super(FilePropertiesObject, self).parse(asf, data)
length, _, preroll = struct.unpack("<QQQ", data[40:64])
# there are files where preroll is larger than length, limit to >= 0
asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0)
@BaseObject._register
class StreamPropertiesObject(BaseObject):
"""Stream properties."""
GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365")
def parse(self, asf, data):
super(StreamPropertiesObject, self).parse(asf, data)
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = bitrate * 8
@BaseObject._register
class CodecListObject(BaseObject):
"""Codec List"""
GUID = guid2bytes("86D15240-311D-11D0-A3A4-00A0C90348F6")
def _parse_entry(self, data, offset):
"""can raise cdata.error"""
type_, offset = cdata.uint16_le_from(data, offset)
units, offset = cdata.uint16_le_from(data, offset)
# utf-16 code units, not characters..
next_offset = offset + units * 2
try:
name = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
name = u""
offset = next_offset
units, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + units * 2
try:
desc = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
desc = u""
offset = next_offset
bytes_, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + bytes_
codec = u""
if bytes_ == 2:
codec_id = cdata.uint16_le_from(data, offset)[0]
if codec_id in CODECS:
codec = CODECS[codec_id]
offset = next_offset
return offset, type_, name, desc, codec
def parse(self, asf, data):
super(CodecListObject, self).parse(asf, data)
offset = 16
count, offset = cdata.uint32_le_from(data, offset)
for i in xrange(count):
try:
offset, type_, name, desc, codec = \
self._parse_entry(data, offset)
except cdata.error:
raise ASFError("invalid codec entry")
# go with the first audio entry
if type_ == 2:
name = name.strip()
desc = desc.strip()
asf.info.codec_type = codec
asf.info.codec_name = name
asf.info.codec_description = desc
return
@BaseObject._register
class PaddingObject(BaseObject):
"""Padding object"""
GUID = guid2bytes("1806D474-CADF-4509-A4BA-9AABCB96AAE8")
@BaseObject._register
class StreamBitratePropertiesObject(BaseObject):
"""Stream bitrate properties"""
GUID = guid2bytes("7BF875CE-468D-11D1-8D82-006097C9A2B2")
@BaseObject._register
class ContentEncryptionObject(BaseObject):
"""Content encryption"""
GUID = guid2bytes("2211B3FB-BD23-11D2-B4B7-00A0C955FC6E")
@BaseObject._register
class ExtendedContentEncryptionObject(BaseObject):
"""Extended content encryption"""
GUID = guid2bytes("298AE614-2622-4C17-B935-DAE07EE9289C")
@BaseObject._register
class HeaderExtensionObject(BaseObject):
"""Header extension."""
GUID = guid2bytes("5FBF03B5-A92E-11CF-8EE3-00C00C205365")
def parse(self, asf, data):
super(HeaderExtensionObject, self).parse(asf, data)
datasize, = struct.unpack("<I", data[18:22])
datapos = 0
while datapos < datasize:
guid, size = struct.unpack(
"<16sQ", data[22 + datapos:22 + datapos + 24])
obj = BaseObject._get_object(guid)
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size])
self.objects.append(obj)
datapos += size
def render(self, asf):
data = bytearray()
for obj in self.objects:
# some files have the padding in the extension header, but we
# want to add it at the end of the top level header. Just
# skip padding at this level.
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
b"\x06\x00" + struct.pack("<I", len(data)) + data)
@BaseObject._register
class MetadataObject(BaseObject):
"""Metadata description."""
GUID = guid2bytes("C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA")
def parse(self, asf, data):
super(MetadataObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata.items()
data = b"".join([attr.render_m(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
@BaseObject._register
class MetadataLibraryObject(BaseObject):
"""Metadata library description."""
GUID = guid2bytes("44231C94-9498-49D1-A141-1D134E457054")
def parse(self, asf, data):
super(MetadataLibraryObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'language': language, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata_library
data = b"".join([attr.render_ml(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
| gpl-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/nrm/package.py | 5 | 1074 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Nrm(PythonPackage):
"""Node Resource Manager"""
homepage = "https://xgitlab.cels.anl.gov/argo/nrm"
url = "https://www.mcs.anl.gov/research/projects/argo/downloads/nrm-0.1.0.tar.gz"
version('0.1.0', sha256='911a848042fa50ed216c818e0667bcd3e4219687eb5a35476b7313abe12106dc')
depends_on('py-setuptools', type=('build'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-pyzmq@17.1.2', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-tornado@5.1.1', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-argparse@1.2.1:', type=('build', 'run'), when='^python@:2.6')
depends_on('py-jsonschema@2.6.0', type=('build', 'run'))
depends_on('py-warlock', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
| lgpl-2.1 |
ric2b/Vivaldi-browser | chromium/mojo/public/tools/bindings/pylib/mojom_tests/support/run_bindings_generator.py | 2 | 1544 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
from subprocess import check_call
import sys
def RunBindingsGenerator(out_dir, root_dir, mojom_file, extra_flags=None):
out_dir = os.path.abspath(out_dir)
root_dir = os.path.abspath(root_dir)
mojom_file = os.path.abspath(mojom_file)
# The mojom file should be under the root directory somewhere.
assert mojom_file.startswith(root_dir)
mojom_reldir = os.path.dirname(os.path.relpath(mojom_file, root_dir))
# TODO(vtl): Abstract out the "main" functions, so that we can just import
# the bindings generator (which would be more portable and easier to use in
# tests).
this_dir = os.path.dirname(os.path.abspath(__file__))
# We're in src/mojo/public/tools/bindings/pylib/mojom_tests/support;
# mojom_bindings_generator.py is in .../bindings.
bindings_generator = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
"mojom_bindings_generator.py")
args = ["python", bindings_generator,
"-o", os.path.join(out_dir, mojom_reldir)]
if extra_flags:
args.extend(extra_flags)
args.append(mojom_file)
check_call(args)
def main(argv):
if len(argv) < 4:
print("usage: %s out_dir root_dir mojom_file [extra_flags]" % argv[0])
return 1
RunBindingsGenerator(argv[1], argv[2], argv[3], extra_flags=argv[4:])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim/configurations/residential_land_share_model_configuration_creator.py | 2 | 8336 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.configuration import Configuration
class ResidentialLandShareModelConfigurationCreator(object):
_model_name = 'residential_land_share_model'
def __init__(self,
dataset = 'gridcell',
debuglevel = 'debuglevel',
coefficients_table = 'residential_land_share_model_coefficients',
specification_table = 'residential_land_share_model_specification',
estimation_procedure = "opus_core.estimate_linear_regression",
input_changed_indices = 'changed_indices'
):
self.dataset = dataset
self.debuglevel = debuglevel
self.coefficients_table = coefficients_table
self.specification_table = specification_table
self.estimation_procedure = estimation_procedure
self.input_changed_indices = input_changed_indices
def execute(self):
# Names of intermediate objects used to get data between steps
# in this model process.
_coefficients = 'coefficients'
_specification = 'specification'
_index = 'index'
return Configuration({
'estimate': {
'arguments': {
'data_objects': 'datasets',
'dataset': self.dataset,
'debuglevel': self.debuglevel,
'index': _index,
'specification': _specification,
'procedure': "'%s'" % self.estimation_procedure
},
'output': '(%s, _)' % _coefficients
},
'import': {
'urbansim.models.%s' % self._model_name: 'ResidentialLandShareModel'
},
'init': {'name': 'ResidentialLandShareModel'},
'prepare_for_estimate': {
'arguments': {
'dataset': self.dataset,
'specification_storage': 'base_cache_storage',
'specification_table': "'%s'" % self.specification_table
},
'name': 'prepare_for_estimate',
'output': '(%s, %s)' % (_specification, _index)
},
'prepare_for_run': {
'arguments': {
'coefficients_storage': 'base_cache_storage',
'coefficients_table': "'%s'" % self.coefficients_table,
'specification_storage': 'base_cache_storage',
'specification_table': "'%s'" % self.specification_table
},
'name': 'prepare_for_run',
'output': '(%s, %s)' % (_specification, _coefficients)
},
'run': {
'arguments': {
'coefficients': _coefficients,
'data_objects': 'datasets',
'dataset': self.dataset,
'debuglevel': self.debuglevel,
'index': self.input_changed_indices,
'specification': _specification
}
}
})
from opus_core.tests import opus_unittest
class TestResidentialLandShareModelConfiguration(opus_unittest.OpusTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_defaults(self):
creator = ResidentialLandShareModelConfigurationCreator()
expected = Configuration({
'estimate': {
'arguments': {
'data_objects': 'datasets',
'dataset': 'gridcell',
'debuglevel': 'debuglevel',
'index': 'index',
'specification': 'specification',
'procedure': "'opus_core.estimate_linear_regression'"
},
'output': '(coefficients, _)'
},
'import': {
'urbansim.models.residential_land_share_model': 'ResidentialLandShareModel'
},
'init': {'name': 'ResidentialLandShareModel'},
'prepare_for_estimate': {
'arguments': {
'dataset': 'gridcell',
'specification_storage': 'base_cache_storage',
'specification_table': "'residential_land_share_model_specification'"
},
'name': 'prepare_for_estimate',
'output': '(specification, index)'
},
'prepare_for_run': {
'arguments': {
'coefficients_storage': 'base_cache_storage',
'coefficients_table': "'residential_land_share_model_coefficients'",
'specification_storage': 'base_cache_storage',
'specification_table': "'residential_land_share_model_specification'"
},
'name': 'prepare_for_run',
'output': '(specification, coefficients)'
},
'run': {
'arguments': {
'coefficients': 'coefficients',
'data_objects': 'datasets',
'dataset': 'gridcell',
'debuglevel': 'debuglevel',
'index': 'changed_indices',
'specification': 'specification'
}
}
})
result = creator.execute()
self.assertDictsEqual(result, expected)
def test_with_arguments(self):
creator = ResidentialLandShareModelConfigurationCreator(
dataset = 'dataset',
debuglevel = -5,
input_changed_indices = 'input_changed_indices',
coefficients_table = 'coefficients_table',
specification_table = 'specification_table',
)
expected = Configuration({
'estimate': {
'arguments': {
'data_objects': 'datasets',
'dataset': 'dataset',
'debuglevel': -5,
'index': 'index',
'specification': 'specification',
'procedure': "'opus_core.estimate_linear_regression'"
},
'output': '(coefficients, _)'
},
'import': {
'urbansim.models.residential_land_share_model': 'ResidentialLandShareModel'
},
'init': {'name': 'ResidentialLandShareModel'},
'prepare_for_estimate': {
'arguments': {
'dataset': 'dataset',
'specification_storage': 'base_cache_storage',
'specification_table': "'specification_table'"
},
'name': 'prepare_for_estimate',
'output': '(specification, index)'
},
'prepare_for_run': {
'arguments': {
'coefficients_storage': 'base_cache_storage',
'coefficients_table': "'coefficients_table'",
'specification_storage': 'base_cache_storage',
'specification_table': "'specification_table'"
},
'name': 'prepare_for_run',
'output': '(specification, coefficients)'
},
'run': {
'arguments': {
'coefficients': 'coefficients',
'data_objects': 'datasets',
'dataset': 'dataset',
'debuglevel': -5,
'index': 'input_changed_indices',
'specification': 'specification'
}
}
})
result = creator.execute()
self.assertDictsEqual(result, expected)
if __name__ == '__main__':
opus_unittest.main() | gpl-2.0 |
GheRivero/ansible | test/runner/lib/metadata.py | 76 | 4798 | """Test metadata for passing data to delegated tests."""
from __future__ import absolute_import, print_function
import json
from lib.util import (
display,
is_shippable,
)
from lib.diff import (
parse_diff,
FileDiff,
)
class Metadata(object):
"""Metadata object for passing data to delegated tests."""
def __init__(self):
"""Initialize metadata."""
self.changes = {} # type: dict [str, tuple[tuple[int, int]]
self.cloud_config = None # type: dict [str, str]
self.instance_config = None # type: list[dict[str, str]]
self.change_description = None # type: ChangeDescription
if is_shippable():
self.ci_provider = 'shippable'
else:
self.ci_provider = ''
def populate_changes(self, diff):
"""
:type diff: list[str] | None
"""
patches = parse_diff(diff)
patches = sorted(patches, key=lambda k: k.new.path) # type: list [FileDiff]
self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
deletes = [patch.old.path for patch in patches if not patch.new.exists]
# make sure old paths which were renamed or deleted are registered in changes
for path in renames + deletes:
if path in self.changes:
# old path was replaced with another file
continue
# failed tests involving deleted files should be using line 0 since there is no content remaining
self.changes[path] = ((0, 0),)
def to_dict(self):
"""
:rtype: dict[str, any]
"""
return dict(
changes=self.changes,
cloud_config=self.cloud_config,
instance_config=self.instance_config,
ci_provider=self.ci_provider,
change_description=self.change_description.to_dict(),
)
def to_file(self, path):
"""
:type path: path
"""
data = self.to_dict()
display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
with open(path, 'w') as data_fd:
json.dump(data, data_fd, sort_keys=True, indent=4)
@staticmethod
def from_file(path):
"""
:type path: str
:rtype: Metadata
"""
with open(path, 'r') as data_fd:
data = json.load(data_fd)
return Metadata.from_dict(data)
@staticmethod
def from_dict(data):
"""
:type data: dict[str, any]
:rtype: Metadata
"""
metadata = Metadata()
metadata.changes = data['changes']
metadata.cloud_config = data['cloud_config']
metadata.instance_config = data['instance_config']
metadata.ci_provider = data['ci_provider']
metadata.change_description = ChangeDescription.from_dict(data['change_description'])
return metadata
class ChangeDescription(object):
"""Description of changes."""
def __init__(self):
self.command = '' # type: str
self.changed_paths = [] # type: list[str]
self.deleted_paths = [] # type: list[str]
self.regular_command_targets = {} # type: dict[str, list[str]]
self.focused_command_targets = {} # type: dict[str, list[str]]
self.no_integration_paths = [] # type: list[str]
@property
def targets(self):
"""
:rtype: list[str] | None
"""
return self.regular_command_targets.get(self.command)
@property
def focused_targets(self):
"""
:rtype: list[str] | None
"""
return self.focused_command_targets.get(self.command)
def to_dict(self):
"""
:rtype: dict[str, any]
"""
return dict(
command=self.command,
changed_paths=self.changed_paths,
deleted_paths=self.deleted_paths,
regular_command_targets=self.regular_command_targets,
focused_command_targets=self.focused_command_targets,
no_integration_paths=self.no_integration_paths,
)
@staticmethod
def from_dict(data):
"""
:param data: dict[str, any]
:rtype: ChangeDescription
"""
changes = ChangeDescription()
changes.command = data['command']
changes.changed_paths = data['changed_paths']
changes.deleted_paths = data['deleted_paths']
changes.regular_command_targets = data['regular_command_targets']
changes.focused_command_targets = data['focused_command_targets']
changes.no_integration_paths = data['no_integration_paths']
return changes
| gpl-3.0 |
kholidfu/django | django/http/multipartparser.py | 332 | 24331 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| bsd-3-clause |
inouire/climage | climage/ls-climage.py | 1 | 3824 | # Copyright 2010-2012 Edouard Garnier de Labareyre
#
# This file is part of climage.
#
# Climage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Climage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Climage. If not, see <http://www.gnu.org/licenses/>.
from colors import *
import sys
import os
import pickle
try:
from PIL import Image
except ImportError:
missingLibMessage()
sys.exit(2)
def completeWithSpaces(s,n):
if len(s)==n:
return s
elif len(s)<n:
for k in range(n-len(s)):
s+=" "
return s
elif len(s)>n:
return s[0:n]
def createSeparator(w):
s=""
for k in range(w):
s+="_"
return s
def createNamesString(cli,w,ppl,id):
"""create a string with the list of climages, knowing the terminal width"""
s=""
k=id
for cl in cli:
s+=completeWithSpaces("["+str(k)+"] "+cl[0],w//ppl)
k+=1
return s
def createClimage(pic,w,h):
"""create a climage in memory"""
#opening picture
try:
im=Image.open(pic)
except IOError:
return (0,0)
S = im.size
i_W = S[0] #image width
i_H = S[1] #image height
t_W = w
step = (2*i_W//t_W)
if step*t_W < i_W*2:
step+=1
p_H = i_H//step
#load image
pix = im.load()
#is there an alpha channel?
try:
if len(pix[0,0])==4:
alpha=1
else:
alpha=0
except TypeError:
return (0,0)
#the lines of the picture
lines=[]
a=""
empty=""
for l in range(t_W//2):
tmp = 2*l*step//2
try:
P=pix[tmp,0]
empty+=" "
except IndexError:
lll=0
for k in range(h):
if k >= p_H:
lines.append(empty)
else:
for l in range(t_W//2):
tmp = 2*l*step//2
try:
P=pix[tmp,k*step]
if alpha==1:
a+="\x1b[48;5;"+str(getCode4(P[0],P[1],P[2],P[3]))+"m "
else:
a+="\x1b[48;5;"+str(getCode3(P[0],P[1],P[2]))+"m "
except IndexError:
lll=0
a+="\x1b[0;m"
lines.append(a)
a=""
#compute real height (if less than max one)
if p_H < h:
height=p_H
else:
height=h
return (pic,lines,height)
if len(sys.argv)<3:
print "Some arguments are missing. Expected: pic per line, terminal width"
exit(1)
#get number of pic per line
ppl = int(sys.argv[1])
#get terminal width
t_W = int(sys.argv[2])
#compute width & height per image
wi = t_W//ppl
hi = 3*wi//8
#list files in this directory
files_list=os.listdir(".")
#create list of tupple (name + climages)
climage_list=[]
for pic in files_list:
try:
tmp=createClimage(pic,wi,hi)
if len(tmp)==3:
print "=",
sys.stdout.flush()
climage_list.append(tmp)
except IOError:
lll=0#ie do nothing
if len(climage_list)!=0:
print ">"
else:
print "climage can't display anything in this folder."
sys.exit(1)
#create a sub-tupples list
n=len(climage_list)//ppl
r=len(climage_list)%ppl
climage_sublist=[]
for i in range(n):
climage_sublist.append(climage_list[ppl*i:ppl*i+ppl])
if r!=0:
climage_sublist.append(climage_list[ppl*n:])
#concatenate climages
#print createSeparator(t_W)
id=1
for L in climage_sublist:
max=0
for cl in L:
if cl[2]>max:
max=cl[2]
for k in range(max):
a=""
for cl in L:
try:
a+=cl[1][k]
except IndexError:
lll=0
print a
print createNamesString(L,t_W,ppl,id)
print createSeparator(t_W)
id+=ppl
#memorize id
k=1
P=[]
for cl in climage_list:
P.append((k,cl[0]))
#logfile.write(str(k)+" "+cl[0]+"\n")
k+=1
logfile = open('/tmp/climage_mem_id', 'w')
pickle.dump(P,logfile)
logfile.close()
| lgpl-3.0 |
phantaminos/uch-ultrasonic-anemometer | software/data_recorder.py | 3 | 1288 | #
# Copyright (C) 2013 UNIVERSIDAD DE CHILE.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
#
# Authors: Luis Alberto Herrera <herrera.luis.alberto@gmail.com>
import argparse
import numpy as np
from scipy.io import netcdf
import adc_reader
parser = argparse.ArgumentParser()
parser.add_argument("--repetitions", type=int,
help="Number of frames to record.")
parser.add_argument("--prefix",
help="Prefix of the files (including output directory.")
args = parser.parse_args()
reader = adc_reader.ADCReader()
data = np.zeros((args.repetitions, adc_reader.kFrameSize))
print "Recording to " + args.prefix
reader.GetNFrames(data)
np.save(args.prefix, data)
| gpl-3.0 |
KamranMackey/CloudBot | plugins/wolframalpha.py | 3 | 1863 | import re
import urllib.parse
import requests
from lxml import etree
from cloudbot import hook
from cloudbot.util import web, formatting
# security
parser = etree.XMLParser(resolve_entities=False, no_network=True)
api_url = 'http://api.wolframalpha.com/v2/query'
query_url = 'http://www.wolframalpha.com/input/?i={}'
@hook.command("wolframalpha", "wa", "calc", "ca", "math", "convert")
def wolframalpha(text, bot):
"""w<query> -- Computes <query> using Wolfram Alpha."""
api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)
if not api_key:
return "error: missing api key"
params = {
'input': text,
'appid': api_key
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Error getting query: {}".format(request.status_code)
result = etree.fromstring(request.content, parser=parser)
# get the URL for a user to view this query in a browser
short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text)))
pod_texts = []
for pod in result.xpath("//pod[@primary='true']"):
title = pod.attrib['title']
if pod.attrib['id'] == 'Input':
continue
results = []
for subpod in pod.xpath('subpod/plaintext/text()'):
subpod = subpod.strip().replace('\\n', '; ')
subpod = re.sub(r'\s+', ' ', subpod)
if subpod:
results.append(subpod)
if results:
pod_texts.append(title + ': ' + ', '.join(results))
ret = ' - '.join(pod_texts)
if not pod_texts:
return 'No results.'
# I have no idea what this regex does.
ret = re.sub(r'\\(.)', r'\1', ret)
ret = formatting.truncate(ret, 250)
if not ret:
return 'No results.'
return "{} - {}".format(ret, short_url)
| gpl-3.0 |
mavarick/pyes | tests/test_multifield.py | 5 | 3889 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
from pyes.query import TermQuery
class MultifieldTestCase(ESTestCase):
def setUp(self):
super(MultifieldTestCase, self).setUp()
mapping = {u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'name': {"type": "multi_field",
"fields": {
u'name': {
u'boost': 1.0,
u'index': u'analyzed',
u'omit_norms': False,
u'omit_term_freq_and_positions': False,
u'store': u'yes',
"term_vector": "with_positions_offsets",
u'type': u'string'},
u'untouched': {u'boost': 1.0,
u'index': u'not_analyzed',
u'omit_norms': False,
u'omit_term_freq_and_positions': False,
u'store': u'yes',
"term_vector": "no",
u'type': u'string'}
}
},
u'pos': {'store': 'yes',
'type': u'integer'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.indices.create_index(self.index_name)
self.conn.indices.put_mapping(self.document_type, {'properties': mapping}, self.index_name)
self.conn.index({"name": "Joe Tester", "parsedtext": "Joe Testere nice guy", "uuid": "11111", "position": 1},
self.index_name, self.document_type, 1)
self.conn.index({"name": "Bill Baloney", "parsedtext": "Joe Testere nice guy", "uuid": "22222", "position": 2},
self.index_name, self.document_type, 2)
self.conn.index({"value": "Joe Tester"}, self.index_name, self.document_type)
self.conn.index({"value": 123343543536}, self.index_name, self.document_type)
self.conn.index({"value": True}, self.index_name, self.document_type)
self.conn.index({"value": 43.32}, self.index_name, self.document_type)
#self.conn.index({"value": datetime.now()}, self.index_name, self.document_type)
self.conn.indices.refresh(self.index_name)
def test_TermQuery(self):
q = TermQuery("name", "joe")
resultset = self.conn.search(query=q, indices=self.index_name)
self.assertEqual(resultset.total, 1)
q = TermQuery("name", "joe", 3)
resultset = self.conn.search(query=q, indices=self.index_name)
self.assertEqual(resultset.total, 1)
q = TermQuery("name", "joe", "3")
resultset = self.conn.search(query=q, indices=self.index_name)
self.assertEqual(resultset.total, 1)
q = TermQuery("value", 43.32)
resultset = self.conn.search(query=q, indices=self.index_name)
self.assertEqual(resultset.total, 1)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
idlesign/django-sitetree | sitetree/tests/conftest.py | 1 | 4305 | import pytest
from pytest_djangoapp import configure_djangoapp_plugin
def hook(settings):
apps = settings['INSTALLED_APPS']
apps.remove('sitetree.tests.testapp')
apps.append('sitetree.tests.testapp.conf.MyAppConfig')
return settings
pytest_plugins = configure_djangoapp_plugin(
settings=dict(
SITETREE_CLS='sitetree.tests.testapp.mysitetree.MySiteTree',
),
admin_contrib=True,
settings_hook=hook
)
@pytest.fixture
def build_tree():
"""Builds a sitetree from dict definition.
Returns items indexed by urls.
Example:
items_map = build_tree(
{'alias': 'mytree'},
[{
'title': 'one', 'url': '/one/', 'children': [
{'title': 'subone', 'url': '/subone/'}
]
}]
)
"""
from sitetree.models import Tree, TreeItem
from django.contrib.auth.models import Permission
def build(tree_dict, items):
def attach_items(tree, items, parent=None):
for item_dict in items:
children = item_dict.pop('children', [])
access_permissions = item_dict.pop('access_permissions', [])
item = TreeItem(**item_dict)
item.tree = tree
item.parent = parent
item.save()
for permission in access_permissions:
item.access_permissions.add(Permission.objects.get(codename=permission))
items_map[f'{item.url}'] = item
children and attach_items(tree, children, parent=item)
items_map = {}
tree = Tree(**tree_dict)
tree.save()
attach_items(tree, items)
return items_map
return build
@pytest.fixture
def common_tree(build_tree):
items = build_tree(
{'alias': 'mytree'},
[{
'title': 'Home', 'url': '/home/', 'children': [
{'title': 'Users', 'url': '/users/', 'children': [
{'title': 'Moderators', 'url': '/users/moderators/'},
{'title': 'Ordinary', 'url': '/users/ordinary/'},
{'title': 'Hidden', 'hidden': True, 'url': '/users/hidden/'},
]},
{'title': 'Articles', 'url': '/articles/', 'children': [
{'title': 'About cats', 'url': '/articles/cats/', 'children': [
{'title': 'Good', 'url': '/articles/cats/good/'},
{'title': 'Bad', 'url': '/articles/cats/bad/'},
{'title': 'Ugly', 'url': '/articles/cats/ugly/'},
]},
{'title': 'About dogs', 'url': '/articles/dogs/'},
{'title': 'About mice', 'inmenu': False, 'url': '/articles/mice/'},
]},
{'title': 'Contacts', 'inbreadcrumbs': False, 'url': '/contacts/', 'children': [
{'title': 'Russia', 'url': '/contacts/russia/',
'hint': 'The place', 'description': 'Russian Federation', 'children': [
{'title': 'Web', 'alias': 'ruweb', 'url': '/contacts/russia/web/', 'children': [
{'title': 'Public {{ subtitle }}', 'url': '/contacts/russia/web/public/'},
{'title': 'Private',
'url': '/contacts/russia/web/private/',
'hint': 'Private Area Hint',
'description': 'Private Area Description',
},
]},
{'title': 'Postal', 'insitetree': False, 'url': '/contacts/russia/postal/'},
]},
{'title': 'Australia', 'urlaspattern': True, 'url': 'contacts_australia australia_var',
'children': [
{'title': 'Alice Springs', 'access_loggedin': True, 'url': '/contacts/australia/alice/'},
{'title': 'Darwin', 'access_guest': True, 'url': '/contacts/australia/darwin/'},
]},
{'title': 'China', 'urlaspattern': True, 'url': 'contacts_china china_var'},
]},
]
}]
)
items[''] = items['/home/']
return items
| bsd-3-clause |
FundersClub/fire | src/firebot/settings/prod.py | 1 | 2942 | import dj_database_url
import os
from firebot.settings.base import * # noqa
###############################################################################
# Django
###############################################################################
ADMIN_URL = os.environ.get('DJANGO_ADMIN_URL')
ALLOWED_HOSTS = os.environ['DJANGO_ALLOWED_HOSTS'].split(',')
CSRF_COOKIE_SECURE = True
DEBUG = os.environ.get('DJANGO_DEBUG') == 'YES'
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
MEDIAFILES_AWS_STORAGE_BUCKET_NAME = os.environ['MEDIAFILES_AWS_STORAGE_BUCKET_NAME']
MEDIAFILES_AWS_ACCESS_KEY_ID = os.environ['MEDIAFILES_AWS_ACCESS_KEY_ID']
MEDIAFILES_AWS_SECRET_ACCESS_KEY = os.environ['MEDIAFILES_AWS_SECRET_ACCESS_KEY']
DEFAULT_FILE_STORAGE = 'firebot.storages.S3MediaFilesStorage'
DATABASES = {
'default': dj_database_url.parse(
os.environ['DATABASE_URL'],
conn_max_age=int(os.environ.get('DJANGO_DB_CONN_MAX_AGE', 0)),
),
}
if os.environ.get('SENTRY_DSN'):
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=os.environ.get('SENTRY_DSN'),
release=os.environ.get('HEROKU_SLUG_COMMIT'),
integrations=[CeleryIntegration(), DjangoIntegration()],
traces_sample_rate=1.0,
send_default_pii=True,
attach_stacktrace=True,
)
###############################################################################
# Celery
###############################################################################
CELERY_BROKER_URL = os.environ['REDIS_URL']
###############################################################################
# GitHub
###############################################################################
GITHUB_BOT_USERNAME = os.environ['GITHUB_BOT_USERNAME']
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
###############################################################################
# Firebot
###############################################################################
BASE_URL = os.environ['FIREBOT_BASE_URL']
CONTACT_URL = os.environ['CONTACT_URL']
PRIVACY_POLICY_URL = os.environ['PRIVACY_POLICY_URL']
TERMS_OF_SERVICE_URL = os.environ['TERMS_OF_SERVICE_URL']
FIREBOT_BANNED_EMAIL_DOMAINS = os.environ['FIREBOT_BANNED_EMAIL_DOMAINS'].split(',')
###############################################################################
# Emails
###############################################################################
EMAIL_BACKEND = 'sendgrid_backend.SendgridBackend'
EMAIL_DOMAIN = os.environ['FIREBOT_EMAIL_DOMAIN']
DEFAULT_FROM_EMAIL = 'bot@' + EMAIL_DOMAIN
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SENDGRID_API_KEY = os.environ['SENDGRID_API_KEY']
SENDGRID_WEBHOOK_SECRET = os.environ['SENDGRID_WEBHOOK_SECRET']
| apache-2.0 |
hpcuantwerpen/easybuild-framework | test/framework/sandbox/easybuild/easyblocks/generic/toolchain.py | 2 | 1427 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Dummy easyblock for toolchains.
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
class Toolchain(EasyBlock):
"""Dummy support for toolchains."""
def configure_step(self):
pass
def build_step(self):
pass
def install_step(self):
pass
def sanity_check_step(self):
pass
| gpl-2.0 |
jamison904/kernel_jflte_tw | build-all.py | 17 | 9439 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
#
# Modify for supporting of the Samsung JF targets.
#
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
import tarfile
version = 'build-all.py, version 0.01'
build_dir = '../../output/all-kernels'
make_command = ["zImage", "modules"]
make_env = os.environ
pwd = os.environ.get("PWD")
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': pwd + '/../prebuilts/gcc/linux-x86/arm/arm-eabi-4.7/bin/arm-eabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/jf_???_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/jactive_???_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
zImage_name = '%s/arch/arm/boot/zImage' % (dest_dir)
tarball_name = '%s/%s.tar' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/jf_defconfig'
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'VARIANT_DEFCONFIG=%s_defconfig' % target,
'DEBUG_DEFCONFIG=jfeng_defconfig',
'SELINUX_DEFCONFIG=jfselinux_defconfig',
'SELINUX_LOG_DEFCONFIG=jfselinux_log_defconfig',
'jf_defconfig'], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
if result == 0:
tar = tarfile.open(tarball_name, "w")
tar.add(zImage_name, arcname='boot.img')
tar.close()
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] jf_att jf_vzw jf_tmo jf_spr ... -- List specific targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
yahman72/robotframework | src/robot/reporting/resultwriter.py | 4 | 5657 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.output import LOGGER
from robot.result import ExecutionResult, Result
from robot.utils import unic
from .jsmodelbuilders import JsModelBuilder
from .logreportwriters import LogWriter, ReportWriter
from .xunitwriter import XUnitWriter
class ResultWriter(object):
"""A class to create log, report, output XML and xUnit files.
:param sources: Either one :class:`~robot.result.executionresult.Result`
object, or one or more paths to existing output XML files.
By default writes ``report.html`` and ``log.html``, but no output XML
or xUnit files. Custom file names can be given and results disabled
or enabled using ``settings`` or ``options`` passed to the
:meth:`write_results` method. The latter is typically more convenient::
writer = ResultWriter(result)
writer.write_results(report='custom.html', log=None, xunit='xunit.xml')
"""
def __init__(self, *sources):
self._sources = sources
def write_results(self, settings=None, **options):
"""Writes results based on the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RebotSettings` object
to configure result writing.
:param options: Used to construct new
:class:`~robot.conf.settings.RebotSettings` object if ``settings``
are not given.
"""
settings = settings or RebotSettings(options)
results = Results(settings, *self._sources)
if settings.output:
self._write_output(results.result, settings.output)
if settings.xunit:
self._write_xunit(results.result, settings.xunit,
settings.xunit_skip_noncritical)
if settings.log:
config = dict(settings.log_config,
minLevel=results.js_result.min_level)
self._write_log(results.js_result, settings.log, config)
if settings.report:
results.js_result.remove_data_not_needed_in_report()
self._write_report(results.js_result, settings.report,
settings.report_config)
return results.return_code
def _write_output(self, result, path):
self._write('Output', result.save, path)
def _write_xunit(self, result, path, skip_noncritical):
self._write('XUnit', XUnitWriter(result, skip_noncritical).write, path)
def _write_log(self, js_result, path, config):
self._write('Log', LogWriter(js_result).write, path, config)
def _write_report(self, js_result, path, config):
self._write('Report', ReportWriter(js_result).write, path, config)
def _write(self, name, writer, path, *args):
try:
writer(path, *args)
except DataError as err:
LOGGER.error(unicode(err))
except EnvironmentError as err:
# `err.filename` can be different than `path` at least if reading
# log/report templates or writing split log fails.
# `unic` is needed due to http://bugs.jython.org/issue1825.
LOGGER.error("Writing %s file '%s' failed: %s: %s" %
(name.lower(), path, err.strerror, unic(err.filename)))
else:
LOGGER.output_file(name, path)
class Results(object):
def __init__(self, settings, *sources):
self._settings = settings
self._sources = sources
if len(sources) == 1 and isinstance(sources[0], Result):
self._result = sources[0]
self._prune = False
self.return_code = self._result.return_code
else:
self._result = None
self._prune = True
self.return_code = -1
self._js_result = None
@property
def result(self):
if self._result is None:
include_keywords = bool(self._settings.log or self._settings.output)
flattened = self._settings.flatten_keywords
self._result = ExecutionResult(include_keywords=include_keywords,
flattened_keywords=flattened,
merge=self._settings.merge,
*self._sources)
self._result.configure(self._settings.status_rc,
self._settings.suite_config,
self._settings.statistics_config)
self.return_code = self._result.return_code
return self._result
@property
def js_result(self):
if self._js_result is None:
builder = JsModelBuilder(log_path=self._settings.log,
split_log=self._settings.split_log,
prune_input_to_save_memory=self._prune)
self._js_result = builder.build_from(self.result)
if self._prune:
self._result = None
return self._js_result
| apache-2.0 |
mexeniz/django-oscar | src/oscar/apps/catalogue/admin.py | 14 | 3027 | from django.contrib import admin
from treebeard.admin import TreeAdmin
from oscar.core.loading import get_model
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
Category = get_model('catalogue', 'Category')
Option = get_model('catalogue', 'Option')
Product = get_model('catalogue', 'Product')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
class AttributeInline(admin.TabularInline):
model = ProductAttributeValue
class ProductRecommendationInline(admin.TabularInline):
model = ProductRecommendation
fk_name = 'primary'
class CategoryInline(admin.TabularInline):
model = ProductCategory
extra = 1
class ProductAttributeInline(admin.TabularInline):
model = ProductAttribute
extra = 2
class ProductClassAdmin(admin.ModelAdmin):
list_display = ('name', 'requires_shipping', 'track_stock')
inlines = [ProductAttributeInline]
class ProductAdmin(admin.ModelAdmin):
date_hierarchy = 'date_created'
list_display = ('get_title', 'upc', 'get_product_class', 'structure',
'attribute_summary', 'date_created')
list_filter = ['structure', 'is_discountable']
inlines = [AttributeInline, CategoryInline, ProductRecommendationInline]
prepopulated_fields = {"slug": ("title",)}
search_fields = ['upc', 'title']
def get_queryset(self, request):
qs = super(ProductAdmin, self).get_queryset(request)
return (
qs
.select_related('product_class', 'parent')
.prefetch_related(
'attribute_values',
'attribute_values__attribute'))
class ProductAttributeAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'product_class', 'type')
prepopulated_fields = {"code": ("name", )}
class OptionAdmin(admin.ModelAdmin):
pass
class ProductAttributeValueAdmin(admin.ModelAdmin):
list_display = ('product', 'attribute', 'value')
class AttributeOptionInline(admin.TabularInline):
model = AttributeOption
class AttributeOptionGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'option_summary')
inlines = [AttributeOptionInline, ]
class CategoryAdmin(TreeAdmin):
pass
admin.site.register(ProductClass, ProductClassAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductAttribute, ProductAttributeAdmin)
admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin)
admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(ProductImage)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductCategory)
| bsd-3-clause |
thegricean/sinking-marbles | models/complex_prior/smoothed_unbinned15/scripts/parseUnreliableSpeakerResults.py | 2 | 2762 | import csv
import itertools
import random
import ast
import sys
#usage
# python parseResults.py results.txt
fname = '../results/'+sys.argv[1]
file_names = [fname]
itemfile = open("items.txt")
items = [" ".join(l.rstrip().split()) for l in itemfile.readlines()]
itemfile.close()
#print items
lines = []
results = []
wresults = []
files = [open(fn) for fn in file_names]
for f in files:
lines.extend([l.rstrip() for l in f.readlines()])
#print lines
def getReducedAlternatives(alts):
basic = ""
lownum = ""
highnum = ""
extra = ""
twowords = ""
threewords = ""
if "some,all,none" in alts:
basic = "0_basic"
if "one,two,three" in alts:
lownum = "1_lownum"
if "eleven" in alts:
highnum = "3_highnum"
if "many" in alts:
extra = "2_extra"
if "almostall" in alts:
twowords = "4_twowords"
if "lessthanhalf" in alts:
threewords = "5_threewords"
return "".join([basic,lownum,extra,highnum,twowords,threewords])
headers = ["Item","QUD","State","Alternatives","SpeakerOptimality","PosteriorProbability"]
k = 0
mcnt = 0
condcnt = 0
priorcnt = -1
while k < len(lines):
if lines[k] == "alternatives":
if priorcnt < 89:
priorcnt = priorcnt+1
else:
priorcnt = 0
# mcnt = mcnt + 1
k = k + 1
alts = getReducedAlternatives(lines[k])
k = k + 1
# priors = lines[k].split(",")
k = k + 1
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
# print pairs
# print k
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
# print priorcnt
# print len(items)
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
elif lines[k].startswith("speaker-opt"):
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
#print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
elif lines[k].startswith("qud"):
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
#print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
else:
#print lines[k]
print "this shouldn't be happening"
print priorcnt
print items[priorcnt]
#print results
for r in results:
inner_dict = dict(zip(headers,r))
wresults.append(inner_dict)
oname = '../results/data/parsed_results.tsv'
w = csv.DictWriter(open(oname, 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
w.writeheader()
w.writerows(wresults)
| mit |
65apps/omim | 3party/freetype/src/tools/docmaker/utils.py | 153 | 3513 | #
# utils.py
#
# Auxiliary functions for the `docmaker' tool (library file).
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
import string, sys, os, glob, itertools
# current output directory
#
output_dir = None
# A function that generates a sorting key. We want lexicographical order
# (primary key) except that capital letters are sorted before lowercase
# ones (secondary key).
#
# The primary key is implemented by lowercasing the input. The secondary
# key is simply the original data appended, character by character. For
# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
# sorted before `fftt__xX'.
#
def index_key( s ):
return string.join( itertools.chain( *zip( s.lower(), s ) ) )
# Sort `input_list', placing the elements of `order_list' in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Divert standard output to a given project documentation file. Use
# `output_dir' to determine the filename location if necessary and save the
# old stdout handle in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by `open_output'.
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument"
+ " '" + output_dir + "' "
+ "is not a valid directory\n" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""Check that a given file exists."""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""Build a list of input files from command-line arguments."""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1:]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
| apache-2.0 |
InfiniteAlpha/profitpy | profit/workbench/portfoliodisplay.py | 18 | 1350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtGui import QFrame
from profit.lib import BasicHandler, Signals
from profit.lib.gui import symbolIcon
from profit.workbench.widgets.ui_portfoliodisplay import Ui_PortfolioDisplay
class PortfolioDisplay(QFrame, Ui_PortfolioDisplay, BasicHandler):
""" PortfolioDisplay -> display the portfolio messages
"""
def __init__(self, parent=None):
""" Initializer.
@param parent ancestor of this object
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.requestSession()
def setSession(self, session):
""" Configures this instance for a session.
@param session Session instance
@return None
"""
self.session = session
model = session.models.portfolio
model.symbolIcon = symbolIcon
self.connect(model, Signals.modelReset, self.resizeTree)
self.portfolioView.setModel(model)
def resizeTree(self):
""" Resizes all columns in the portfolio tree.
"""
view = self.portfolioView
cols = range(view.model().invisibleRootItem.itemCount())
for col in cols:
view.resizeColumnToContents(col)
| gpl-2.0 |
thorrak/fermentrack | gravity/signals.py | 1 | 2221 | from django.dispatch import receiver
from django.db.models.signals import post_save, pre_delete
from .models import GravitySensor, TiltConfiguration, TiltGravityCalibrationPoint, TiltTempCalibrationPoint
# The main purpose of these signals at the moment is to trigger reloading of the TiltConfiguration object within the
# tilt_manager.py script whenever there are changes to objects that could alter the Tilt's (or the Tilt manager's)
# behavior
@receiver(post_save, sender=GravitySensor)
def handle_gravitysensor_post_save(sender, **kwargs):
"""
Trigger anything that should happen on update of GravitySensor
"""
sensor = kwargs.get('instance')
if hasattr(sensor, 'tilt_configuration'):
# Every time we update a GravitySensor we want to trigger a reload of the Tilt configuration in case logging
# is enabled/disabled. Otherwise, no data will get logged (or data will erroneously continue to be logged)
try:
sensor.tilt_configuration.set_redis_reload_flag()
except:
# If we can't connect to Redis, it's all good
pass
@receiver(post_save, sender=TiltConfiguration)
def handle_tiltconfiguration_post_save(sender, **kwargs):
"""
Trigger anything that should happen on update of TiltConfiguration
"""
tilt = kwargs.get('instance')
try:
tilt.set_redis_reload_flag()
except:
pass
@receiver(post_save, sender=TiltGravityCalibrationPoint)
def handle_TiltGravityCalibrationPoint_post_save(sender, **kwargs):
"""
Trigger anything that should happen on update of TiltGravityCalibrationPoint
"""
calibration_point = kwargs.get('instance')
try:
calibration_point.sensor.set_redis_reload_flag()
except:
pass
@receiver(post_save, sender=TiltTempCalibrationPoint)
def handle_TiltTempCalibrationPoint_post_save(sender, **kwargs):
"""
Trigger anything that should happen on update of TiltTempCalibrationPoint
"""
calibration_point = kwargs.get('instance')
try:
calibration_point.sensor.set_redis_reload_flag()
except:
pass
# TODO - Add a pre_delete signal to trigger cessation of the relevant tilt_manager process | mit |
sgt7kor/android_kernel_samsung_m180 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
linearregression/subuser | logic/subuserlib/executablePath.py | 3 | 1841 | #!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
This module provides the usefull function C{which} which allows us to find the full path of a given executable and determine if an executable is present on the given system.
"""
#external imports
import os
#internal imports
#import ...
def isExecutable(fpath):
"""
Returns true if the given filepath points to an executable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# Origonally taken from: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
"""
@type program: string
@param program: The short name of the executable. Ex: "vim"
@rtype: str or None
@return: Returns the full path of a given executable. Or None, of the executable is not present on the given system.
"""
fpath, fname = os.path.split(program)
if not fpath == '':
if isExecutable(program):
return program
else:
def matchesImage(path):
fpath,fname = os.path.split(path)
return program == fname
programMatches = queryPATH(matchesImage)
if len(programMatches) > 0:
return programMatches[0]
return None
def queryPATH(test):
"""
Search the PATH for an executable.
Given a function which takes an absoulte filepath and returns True when the filepath matches the query, return a list of full paths to matched files.
"""
matches = []
def appendIfMatches(exeFile):
if isExecutable(exeFile):
if test(exeFile):
matches.append(exeFile)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
if os.path.exists(path):
for fileInPath in os.listdir(path):
exeFile = os.path.join(path, fileInPath)
appendIfMatches(exeFile)
return matches
| lgpl-3.0 |
zhaodelong/django | django/contrib/gis/db/backends/mysql/introspection.py | 700 | 1771 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
def supports_spatial_index(self, cursor, table_name):
# Supported with MyISAM, or InnoDB on MySQL 5.7.5+
storage_engine = self.get_storage_engine(cursor, table_name)
return (
(storage_engine == 'InnoDB' and self.connection.mysql_version >= (5, 7, 5)) or
storage_engine == 'MyISAM'
)
| bsd-3-clause |
onethirtyfive/skadi | skadi/protoc/networkbasetypes_pb2.py | 2 | 19683 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networkbasetypes.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import google.protobuf.descriptor_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='networkbasetypes.proto',
package='',
serialized_pb='\n\x16networkbasetypes.proto\x1a google/protobuf/descriptor.proto\"-\n\nCMsgVector\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"$\n\x0c\x43MsgVector2D\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"-\n\nCMsgQAngle\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"\xfc\x01\n\x11\x43SVCMsg_GameEvent\x12\x12\n\nevent_name\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventid\x18\x02 \x01(\x05\x12&\n\x04keys\x18\x03 \x03(\x0b\x32\x18.CSVCMsg_GameEvent.key_t\x1a\x99\x01\n\x05key_t\x12\x0c\n\x04type\x18\x01 \x01(\x05\x12\x12\n\nval_string\x18\x02 \x01(\t\x12\x11\n\tval_float\x18\x03 \x01(\x02\x12\x10\n\x08val_long\x18\x04 \x01(\x05\x12\x11\n\tval_short\x18\x05 \x01(\x05\x12\x10\n\x08val_byte\x18\x06 \x01(\x05\x12\x10\n\x08val_bool\x18\x07 \x01(\x08\x12\x12\n\nval_uint64\x18\x08 \x01(\x04\"\x85\x01\n\x16\x43SVCMsgList_GameEvents\x12/\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x1f.CSVCMsgList_GameEvents.event_t\x1a:\n\x07\x65vent_t\x12\x0c\n\x04tick\x18\x01 \x01(\x05\x12!\n\x05\x65vent\x18\x02 \x01(\x0b\x32\x12.CSVCMsg_GameEvent\"9\n\x13\x43SVCMsg_UserMessage\x12\x10\n\x08msg_type\x18\x01 \x01(\x05\x12\x10\n\x08msg_data\x18\x02 \x01(\x0c\"\x8f\x01\n\x18\x43SVCMsgList_UserMessages\x12\x35\n\x08usermsgs\x18\x01 \x03(\x0b\x32#.CSVCMsgList_UserMessages.usermsg_t\x1a<\n\tusermsg_t\x12\x0c\n\x04tick\x18\x01 \x01(\x05\x12!\n\x03msg\x18\x02 \x01(\x0b\x32\x14.CSVCMsg_UserMessage*\xd2\x01\n\x0bSIGNONSTATE\x12\x14\n\x10SIGNONSTATE_NONE\x10\x00\x12\x19\n\x15SIGNONSTATE_CHALLENGE\x10\x01\x12\x19\n\x15SIGNONSTATE_CONNECTED\x10\x02\x12\x13\n\x0fSIGNONSTATE_NEW\x10\x03\x12\x18\n\x14SIGNONSTATE_PRESPAWN\x10\x04\x12\x15\n\x11SIGNONSTATE_SPAWN\x10\x05\x12\x14\n\x10SIGNONSTATE_FULL\x10\x06\x12\x1b\n\x17SIGNONSTATE_CHANGELEVEL\x10\x07')
_SIGNONSTATE = _descriptor.EnumDescriptor(
name='SIGNONSTATE',
full_name='SIGNONSTATE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_CHALLENGE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_CONNECTED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_NEW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_PRESPAWN', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_SPAWN', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_FULL', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNONSTATE_CHANGELEVEL', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=789,
serialized_end=999,
)
SIGNONSTATE = enum_type_wrapper.EnumTypeWrapper(_SIGNONSTATE)
SIGNONSTATE_NONE = 0
SIGNONSTATE_CHALLENGE = 1
SIGNONSTATE_CONNECTED = 2
SIGNONSTATE_NEW = 3
SIGNONSTATE_PRESPAWN = 4
SIGNONSTATE_SPAWN = 5
SIGNONSTATE_FULL = 6
SIGNONSTATE_CHANGELEVEL = 7
_CMSGVECTOR = _descriptor.Descriptor(
name='CMsgVector',
full_name='CMsgVector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='CMsgVector.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='CMsgVector.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='z', full_name='CMsgVector.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=60,
serialized_end=105,
)
_CMSGVECTOR2D = _descriptor.Descriptor(
name='CMsgVector2D',
full_name='CMsgVector2D',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='CMsgVector2D.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='CMsgVector2D.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=107,
serialized_end=143,
)
_CMSGQANGLE = _descriptor.Descriptor(
name='CMsgQAngle',
full_name='CMsgQAngle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='CMsgQAngle.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='CMsgQAngle.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='z', full_name='CMsgQAngle.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=145,
serialized_end=190,
)
_CSVCMSG_GAMEEVENT_KEY_T = _descriptor.Descriptor(
name='key_t',
full_name='CSVCMsg_GameEvent.key_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='CSVCMsg_GameEvent.key_t.type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_string', full_name='CSVCMsg_GameEvent.key_t.val_string', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_float', full_name='CSVCMsg_GameEvent.key_t.val_float', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_long', full_name='CSVCMsg_GameEvent.key_t.val_long', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_short', full_name='CSVCMsg_GameEvent.key_t.val_short', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_byte', full_name='CSVCMsg_GameEvent.key_t.val_byte', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_bool', full_name='CSVCMsg_GameEvent.key_t.val_bool', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='val_uint64', full_name='CSVCMsg_GameEvent.key_t.val_uint64', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=292,
serialized_end=445,
)
_CSVCMSG_GAMEEVENT = _descriptor.Descriptor(
name='CSVCMsg_GameEvent',
full_name='CSVCMsg_GameEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event_name', full_name='CSVCMsg_GameEvent.event_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eventid', full_name='CSVCMsg_GameEvent.eventid', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='CSVCMsg_GameEvent.keys', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CSVCMSG_GAMEEVENT_KEY_T, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=193,
serialized_end=445,
)
_CSVCMSGLIST_GAMEEVENTS_EVENT_T = _descriptor.Descriptor(
name='event_t',
full_name='CSVCMsgList_GameEvents.event_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tick', full_name='CSVCMsgList_GameEvents.event_t.tick', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='event', full_name='CSVCMsgList_GameEvents.event_t.event', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=523,
serialized_end=581,
)
_CSVCMSGLIST_GAMEEVENTS = _descriptor.Descriptor(
name='CSVCMsgList_GameEvents',
full_name='CSVCMsgList_GameEvents',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='events', full_name='CSVCMsgList_GameEvents.events', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CSVCMSGLIST_GAMEEVENTS_EVENT_T, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=448,
serialized_end=581,
)
_CSVCMSG_USERMESSAGE = _descriptor.Descriptor(
name='CSVCMsg_UserMessage',
full_name='CSVCMsg_UserMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msg_type', full_name='CSVCMsg_UserMessage.msg_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg_data', full_name='CSVCMsg_UserMessage.msg_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=583,
serialized_end=640,
)
_CSVCMSGLIST_USERMESSAGES_USERMSG_T = _descriptor.Descriptor(
name='usermsg_t',
full_name='CSVCMsgList_UserMessages.usermsg_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tick', full_name='CSVCMsgList_UserMessages.usermsg_t.tick', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg', full_name='CSVCMsgList_UserMessages.usermsg_t.msg', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=726,
serialized_end=786,
)
_CSVCMSGLIST_USERMESSAGES = _descriptor.Descriptor(
name='CSVCMsgList_UserMessages',
full_name='CSVCMsgList_UserMessages',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='usermsgs', full_name='CSVCMsgList_UserMessages.usermsgs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CSVCMSGLIST_USERMESSAGES_USERMSG_T, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=643,
serialized_end=786,
)
_CSVCMSG_GAMEEVENT_KEY_T.containing_type = _CSVCMSG_GAMEEVENT;
_CSVCMSG_GAMEEVENT.fields_by_name['keys'].message_type = _CSVCMSG_GAMEEVENT_KEY_T
_CSVCMSGLIST_GAMEEVENTS_EVENT_T.fields_by_name['event'].message_type = _CSVCMSG_GAMEEVENT
_CSVCMSGLIST_GAMEEVENTS_EVENT_T.containing_type = _CSVCMSGLIST_GAMEEVENTS;
_CSVCMSGLIST_GAMEEVENTS.fields_by_name['events'].message_type = _CSVCMSGLIST_GAMEEVENTS_EVENT_T
_CSVCMSGLIST_USERMESSAGES_USERMSG_T.fields_by_name['msg'].message_type = _CSVCMSG_USERMESSAGE
_CSVCMSGLIST_USERMESSAGES_USERMSG_T.containing_type = _CSVCMSGLIST_USERMESSAGES;
_CSVCMSGLIST_USERMESSAGES.fields_by_name['usermsgs'].message_type = _CSVCMSGLIST_USERMESSAGES_USERMSG_T
DESCRIPTOR.message_types_by_name['CMsgVector'] = _CMSGVECTOR
DESCRIPTOR.message_types_by_name['CMsgVector2D'] = _CMSGVECTOR2D
DESCRIPTOR.message_types_by_name['CMsgQAngle'] = _CMSGQANGLE
DESCRIPTOR.message_types_by_name['CSVCMsg_GameEvent'] = _CSVCMSG_GAMEEVENT
DESCRIPTOR.message_types_by_name['CSVCMsgList_GameEvents'] = _CSVCMSGLIST_GAMEEVENTS
DESCRIPTOR.message_types_by_name['CSVCMsg_UserMessage'] = _CSVCMSG_USERMESSAGE
DESCRIPTOR.message_types_by_name['CSVCMsgList_UserMessages'] = _CSVCMSGLIST_USERMESSAGES
class CMsgVector(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CMSGVECTOR
# @@protoc_insertion_point(class_scope:CMsgVector)
class CMsgVector2D(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CMSGVECTOR2D
# @@protoc_insertion_point(class_scope:CMsgVector2D)
class CMsgQAngle(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CMSGQANGLE
# @@protoc_insertion_point(class_scope:CMsgQAngle)
class CSVCMsg_GameEvent(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class key_t(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSVCMSG_GAMEEVENT_KEY_T
# @@protoc_insertion_point(class_scope:CSVCMsg_GameEvent.key_t)
DESCRIPTOR = _CSVCMSG_GAMEEVENT
# @@protoc_insertion_point(class_scope:CSVCMsg_GameEvent)
class CSVCMsgList_GameEvents(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class event_t(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSVCMSGLIST_GAMEEVENTS_EVENT_T
# @@protoc_insertion_point(class_scope:CSVCMsgList_GameEvents.event_t)
DESCRIPTOR = _CSVCMSGLIST_GAMEEVENTS
# @@protoc_insertion_point(class_scope:CSVCMsgList_GameEvents)
class CSVCMsg_UserMessage(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSVCMSG_USERMESSAGE
# @@protoc_insertion_point(class_scope:CSVCMsg_UserMessage)
class CSVCMsgList_UserMessages(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class usermsg_t(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSVCMSGLIST_USERMESSAGES_USERMSG_T
# @@protoc_insertion_point(class_scope:CSVCMsgList_UserMessages.usermsg_t)
DESCRIPTOR = _CSVCMSGLIST_USERMESSAGES
# @@protoc_insertion_point(class_scope:CSVCMsgList_UserMessages)
# @@protoc_insertion_point(module_scope)
| mit |
empeeu/numpy | numpy/core/numerictypes.py | 18 | 29192 | """
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
import types as _types
import sys
from numpy.compat import bytes, long
import numbers
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
#import string
# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
# LOWER_TABLE)
# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
# UPPER_TABLE)
#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
elif name=='datetime64':
char = 'M'
elif name=='timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name=='bytes_':
char = 'S'
base = 'bytes'
elif name=='str_':
char = 'U'
base = 'str'
else:
if name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in allTypes.keys():
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers handled so that
# The int32, int64 types should agree exactly with
# PyArray_INT32, PyArray_INT64 in C
# We need to enforce the same checking as is done
# in arrayobject.h where the order of getting a
# bit-width match is:
# long, longlong, int, short, char
# for int8, int16, int32, int64, int128
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p', 'P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, str, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int: 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print np.sctype2char(sctype)
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,), key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a, b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
_register_types()
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc, maxa)
else:
return maxa
| bsd-3-clause |
cortedeltimo/SickRage | lib/future/utils/__init__.py | 36 | 20238 | """
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
| gpl-3.0 |
jonathonwalz/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_facts.py | 23 | 6518 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: ec2_vpc_endpoint_facts
short_description: Retrieves AWS VPC endpoints details using AWS methods.
description:
- Gets various details related to AWS VPC Endpoints
version_added: "2.4"
requirements: [ boto3 ]
options:
query:
description:
- Specifies the query action to take. Services returns the supported
AWS services that can be specified when creating an endpoint.
required: True
choices:
- services
- endpoints
vpc_endpoint_ids:
description:
- Get details of specific endpoint IDs
- Provide this value as a list
required: false
default: None
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
for possible filters.
required: false
default: None
author: Karen Cheng(@Etherdaemon)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple example of listing all support AWS services for VPC endpoints
- name: List supported AWS endpoint services
ec2_vpc_endpoint_facts:
query: services
region: ap-southeast-2
register: supported_endpoint_services
- name: Get all endpoints in ap-southeast-2 region
ec2_vpc_endpoint_facts:
query: endpoints
region: ap-southeast-2
register: existing_endpoints
- name: Get all endpoints with specific filters
ec2_vpc_endpoint_facts:
query: endpoints
region: ap-southeast-2
filters:
vpc-id:
- vpc-12345678
- vpc-87654321
vpc-endpoint-state:
- available
- pending
register: existing_endpoints
- name: Get details on specific endpoint
ec2_vpc_endpoint_facts:
query: endpoints
region: ap-southeast-2
vpc_endpoint_ids:
- vpce-12345678
register: endpoint_details
'''
RETURN = '''
service_names:
description: AWS VPC endpoint service names
returned: I(query) is C(services)
type: list
sample:
service_names:
- com.amazonaws.ap-southeast-2.s3
vpc_endpoints:
description:
- A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
returned: I(query) is C(endpoints)
type: list
sample:
vpc_endpoints:
- creation_timestamp: "2017-02-16T11:06:48+00:00"
policy_document: >
"{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
\"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
route_table_ids:
- rtb-abcd1234
service_name: "com.amazonaws.ap-southeast-2.s3"
state: "available"
vpc_endpoint_id: "vpce-abbad0d0"
vpc_id: "vpc-1111ffff"
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, get_aws_connection_info
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, HAS_BOTO3
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass # will be picked up from imported HAS_BOTO3
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def get_supported_services(client, module):
results = list()
params = dict()
while True:
response = client.describe_vpc_endpoint_services(**params)
results.extend(response['ServiceNames'])
if 'NextToken' in response:
params['NextToken'] = response['NextToken']
else:
break
return dict(service_names=results)
def get_endpoints(client, module):
results = list()
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get('vpc_endpoint_ids'):
params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
while True:
response = client.describe_vpc_endpoints(**params)
results.extend(response['VpcEndpoints'])
if 'NextToken' in response:
params['NextToken'] = response['NextToken']
else:
break
try:
results = json.loads(json.dumps(results, default=date_handler))
except Exception as e:
module.fail_json(msg=str(e.message))
return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
query=dict(choices=['services', 'endpoints'], required=True),
filters=dict(default={}, type='dict'),
vpc_endpoint_ids=dict(type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
invocations = {
'services': get_supported_services,
'endpoints': get_endpoints,
}
results = invocations[module.params.get('query')](connection, module)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
jessesnyder/beatbox | src/beatbox/tests/test_benchmark.py | 6 | 3661 | from types import DictType, StringTypes, IntType, ListType, TupleType
import gc
import unittest
import datetime
from time import time
import sfconfig
import beatbox
from beatbox import SoapFaultError
BENCHMARK_REPS = 1
def benchmark(func):
def benchmarked_func(self):
# temporarily disable garbage collection
gc.disable()
t0 = time()
for i in xrange(0, BENCHMARK_REPS):
func(self)
t1 = time()
gc.enable()
elapsed = t1 - t0
print "\n%s: %s\n" % (func.__name__, elapsed)
return benchmarked_func
class TestUtils(unittest.TestCase):
def setUp(self):
self.svc = svc = beatbox.PythonClient(serverUrl='https://www.salesforce.com/services/Soap/u/15.0')
svc.login(sfconfig.USERNAME, sfconfig.PASSWORD)
self._todelete = list()
def tearDown(self):
svc = self.svc
ids = self._todelete
if ids:
while len(ids) > 200:
svc.delete(ids[:200])
ids = ids[200:]
if ids:
svc.delete(ids)
self._todelete = list()
@benchmark
def testDescribeSObjects(self):
svc = self.svc
globalres = svc.describeGlobal()
types = globalres['types']
res = svc.describeSObjects(types[0])
self.assertEqual(type(res), ListType)
self.assertEqual(len(res), 1)
res = svc.describeSObjects(types[:100])
self.assertEqual(len(types[:100]), len(res))
@benchmark
def testCreate(self):
svc = self.svc
data = dict(type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='john@doe.com',
Birthdate = datetime.date(1970, 1, 4)
)
res = svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, FirstName, Phone, Email, Birthdate',
'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate']:
self.assertEqual(
data[k], contact[k])
@benchmark
def testQuery(self):
svc = self.svc
data = dict(type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='john@doe.com',
Birthdate = datetime.date(1970, 1, 4)
)
res = svc.create([data])
self._todelete.append(res[0]['id'])
data2 = dict(type='Contact',
LastName='Doe',
FirstName='Jane',
Phone='123-456-7890',
Email='jane@doe.com',
Birthdate = datetime.date(1972, 10, 15)
)
res = svc.create([data2])
janeid = res[0]['id']
self._todelete.append(janeid)
res = svc.query('LastName, FirstName, Phone, Email, Birthdate',
'Contact', "LastName = 'Doe'")
self.assertEqual(res['size'], 2)
res = svc.query('Id, LastName, FirstName, Phone, Email, Birthdate',
'Contact', "LastName = 'Doe' and FirstName = 'Jane'")
self.assertEqual(res['size'], 1)
self.assertEqual(res['records'][0]['Id'], janeid)
self.tearDown()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestUtils),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') | gpl-2.0 |
agiliq/nginx-python-buildpack | vendor/pip-1.5.4/pip/_vendor/requests/packages/charade/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
yunxliu/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_child-src_cross-origin_blocked-manual.py | 25 | 2612 | def main(request, response):
response.headers.set(
"Content-Security-Policy",
"child-src http://www.w3c.com")
response.headers.set(
"X-Content-Security-Policy",
"child-src http://www.w3c.com")
response.headers.set("X-WebKit-CSP", "child-src http://www.w3c.com")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Xu, Jianfeng <jianfengx.xu@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_child-src_cross-origin_blocked</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://w3c.github.io/webappsec/specs/content-security-policy/csp-specification.dev.html#child-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="child-src http://www.w3c.com"/>
<meta charset="utf-8"/>
<script src="../resources/server.js?pipe=sub"></script>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<iframe id="test" frameborder="no" border="0"></iframe>
<script>
document.getElementById("test").src = "http://" + __SERVER__NAME + ":" + __CORS__PORT + "/tests/csp/support/red-100x100.png";
</script>
</body>
</html> """
| bsd-3-clause |
musically-ut/statsmodels | statsmodels/examples/tsa/ex_arma_all.py | 34 | 1982 |
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.descriptivestats import TsaDescriptive
from statsmodels.tsa.arma_mle import Arma
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)
d = TsaDescriptive(x)
d.plot4()
#d.fit(order=(1,1))
d.fit((1,1), trend='nc')
print(d.res.params)
modc = Arma(x)
resls = modc.fit(order=(1,1))
print(resls[0])
rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])
print(rescm.params)
#decimal 1 corresponds to threshold of 5% difference
assert_almost_equal(resls[0] / d.res.params, 1, decimal=1)
assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)
#copied to tsa.tests
plt.figure()
plt.plot(x, 'b-o')
plt.plot(modc.predicted(), 'r-')
plt.figure()
plt.plot(modc.error_estimate)
#plt.show()
from statsmodels.miscmodels.tmodel import TArma
modct = TArma(x)
reslst = modc.fit(order=(1,1))
print(reslst[0])
rescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,
maxfun=500)
print(rescmt.params)
from statsmodels.tsa.arima_model import ARMA
mkf = ARMA(x)
##rkf = mkf.fit((1,1))
##rkf.params
rkf = mkf.fit((1,1), trend='nc')
print(rkf.params)
from statsmodels.tsa.arima_process import arma_generate_sample
np.random.seed(12345)
y_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)
##arma22 = ARMA(y_arma22)
##res22 = arma22.fit(trend = 'nc', order=(2,2))
##print 'kf ',res22.params
##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))
##print 'css', res22css.params
mod22 = Arma(y_arma22)
resls22 = mod22.fit(order=(2,2))
print('ls ', resls22[0])
resmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)
print('mle', resmle22.params)
f = mod22.forecast()
f3 = mod22.forecast3(start=900)[-20:]
print(y_arma22[-10:])
print(f[-20:])
print(f3[-109:-90])
plt.show() | bsd-3-clause |
TheTacoScott/GoAtThrottleUp | ServerRelay/cherrypy/test/modpy.py | 12 | 5027 | """Wrapper for mod_python, for use as a CherryPy HTTP server when testing.
To autostart modpython, the "apache" executable or script must be
on your system path, or you must override the global APACHE_PATH.
On some platforms, "apache" may be called "apachectl" or "apache2ctl"--
create a symlink to them if needed.
If you wish to test the WSGI interface instead of our _cpmodpy interface,
you also need the 'modpython_gateway' module at:
http://projects.amor.org/misc/wiki/ModPythonGateway
KNOWN BUGS
==========
1. Apache processes Range headers automatically; CherryPy's truncated
output is then truncated again by Apache. See test_core.testRanges.
This was worked around in http://www.cherrypy.org/changeset/1319.
2. Apache does not allow custom HTTP methods like CONNECT as per the spec.
See test_core.testHTTPMethods.
3. Max request header and body settings do not work with Apache.
4. Apache replaces status "reason phrases" automatically. For example,
CherryPy may set "304 Not modified" but Apache will write out
"304 Not Modified" (capital "M").
5. Apache does not allow custom error codes as per the spec.
6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the
Request-URI too early.
7. mod_python will not read request bodies which use the "chunked"
transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block
instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and
mod_python's requestobject.c).
8. Apache will output a "Content-Length: 0" response header even if there's
no response entity body. This isn't really a bug; it just differs from
the CherryPy default.
"""
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import re
import time
from cherrypy.test import helper
def read_process(cmd, args=""):
pipein, pipeout = os.popen4("%s %s" % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r"(not recognized|No such file|not found)", firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
APACHE_PATH = "httpd"
CONF_PATH = "test_mp.conf"
conf_modpython_gateway = """
# Apache2 server conf file for testing CherryPy with modpython_gateway.
ServerName 127.0.0.1
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
SetHandler python-program
PythonFixupHandler cherrypy.test.modpy::wsgisetup
PythonOption testmod %(modulename)s
PythonHandler modpython_gateway::handler
PythonOption wsgi.application cherrypy::tree
PythonOption socket_host %(host)s
PythonDebug On
"""
conf_cpmodpy = """
# Apache2 server conf file for testing CherryPy with _cpmodpy.
ServerName 127.0.0.1
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
SetHandler python-program
PythonFixupHandler cherrypy.test.modpy::cpmodpysetup
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup cherrypy.test.%(modulename)s::setup_server
PythonOption socket_host %(host)s
PythonDebug On
"""
class ModPythonSupervisor(helper.Supervisor):
using_apache = True
using_wsgi = False
template = None
def __str__(self):
return "ModPython Server on %s:%s" % (self.host, self.port)
def start(self, modulename):
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
f = open(mpconf, 'wb')
try:
f.write(self.template %
{'port': self.port, 'modulename': modulename,
'host': self.host})
finally:
f.close()
result = read_process(APACHE_PATH, "-k start -f %s" % mpconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, "-k stop")
loaded = False
def wsgisetup(req):
global loaded
if not loaded:
loaded = True
options = req.get_options()
import cherrypy
cherrypy.config.update({
"log.error_file": os.path.join(curdir, "test.log"),
"environment": "test_suite",
"server.socket_host": options['socket_host'],
})
modname = options['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.server.unsubscribe()
cherrypy.engine.start()
from mod_python import apache
return apache.OK
def cpmodpysetup(req):
global loaded
if not loaded:
loaded = True
options = req.get_options()
import cherrypy
cherrypy.config.update({
"log.error_file": os.path.join(curdir, "test.log"),
"environment": "test_suite",
"server.socket_host": options['socket_host'],
})
from mod_python import apache
return apache.OK
| mit |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/boto/ec2/image.py | 92 | 16222 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'productCode':
self.append(value)
class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'billingProduct':
self.append(value)
class Image(TaggedEC2Object):
"""
Represents an EC2 Image
"""
def __init__(self, connection=None):
super(Image, self).__init__(connection)
self.id = None
self.location = None
self.state = None
self.ownerId = None # for backwards compatibility
self.owner_id = None
self.owner_alias = None
self.is_public = False
self.architecture = None
self.platform = None
self.type = None
self.kernel_id = None
self.ramdisk_id = None
self.name = None
self.description = None
self.product_codes = ProductCodes()
self.billing_products = BillingProducts()
self.block_device_mapping = None
self.root_device_type = None
self.root_device_name = None
self.virtualization_type = None
self.hypervisor = None
self.instance_lifecycle = None
self.sriov_net_support = None
def __repr__(self):
return 'Image:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Image, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'billingProducts':
return self.billing_products
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.id = value
elif name == 'imageLocation':
self.location = value
elif name == 'imageState':
self.state = value
elif name == 'imageOwnerId':
self.ownerId = value # for backwards compatibility
self.owner_id = value
elif name == 'isPublic':
if value == 'false':
self.is_public = False
elif value == 'true':
self.is_public = True
else:
raise Exception(
'Unexpected value of isPublic %s for image %s' % (
value,
self.id
)
)
elif name == 'architecture':
self.architecture = value
elif name == 'imageType':
self.type = value
elif name == 'kernelId':
self.kernel_id = value
elif name == 'ramdiskId':
self.ramdisk_id = value
elif name == 'imageOwnerAlias':
self.owner_alias = value
elif name == 'platform':
self.platform = value
elif name == 'name':
self.name = value
elif name == 'description':
self.description = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'instanceLifecycle':
self.instance_lifecycle = value
elif name == 'sriovNetSupport':
self.sriov_net_support = value
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the image's state information by making a call to fetch
the current image attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
image the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_images([self.id], dry_run=dry_run)
if len(rs) > 0:
img = rs[0]
if img.id == self.id:
self._update(img)
elif validate:
raise ValueError('%s is not a valid Image ID' % self.id)
return self.state
def run(self, min_count=1, max_count=1, key_name=None,
security_groups=None, user_data=None,
addressing_type=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, security_group_ids=None,
additional_info=None, instance_profile_name=None,
instance_profile_arn=None, tenancy=None, dry_run=False):
"""
Runs this instance.
:type min_count: int
:param min_count: The minimum number of instances to start
:type max_count: int
:param max_count: The maximum number of instances to start
:type key_name: string
:param key_name: The name of the key pair with which to
launch instances.
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances.
:type user_data: string
:param user_data: The Base64-encoded MIME user data to be made
available to the instance(s) in this reservation.
:type instance_type: string
:param instance_type: The type of instance to run:
* t1.micro
* m1.small
* m1.medium
* m1.large
* m1.xlarge
* m3.medium
* m3.large
* m3.xlarge
* m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cr1.8xlarge
* hi1.4xlarge
* hs1.8xlarge
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
* t2.micro
* t2.small
* t2.medium
:type placement: string
:param placement: The Availability Zone to launch the instance into.
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances.
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances.
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on
the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can
optionally use this parameter to assign the instance a
specific available IP address from the subnet (e.g.,
10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or terminates on instance-initiated shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type additional_info: string
:param additional_info: Specifies additional information to make
available to the instance(s).
:type security_group_ids: list of strings
:param security_group_ids: The ID of the VPC security groups with
which to associate instances.
:type instance_profile_name: string
:param instance_profile_name: The name of
the IAM Instance Profile (IIP) to associate with the instances.
:type instance_profile_arn: string
:param instance_profile_arn: The Amazon resource name (ARN) of
the IAM Instance Profile (IIP) to associate with the instances.
:type tenancy: string
:param tenancy: The tenancy of the instance you want to
launch. An instance with a tenancy of 'dedicated' runs on
single-tenant hardware and can only be launched into a
VPC. Valid values are:"default" or "dedicated".
NOTE: To use dedicated tenancy you MUST specify a VPC
subnet-ID as well.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
"""
return self.connection.run_instances(self.id, min_count, max_count,
key_name, security_groups,
user_data, addressing_type,
instance_type, placement,
kernel_id, ramdisk_id,
monitoring_enabled, subnet_id,
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address, placement_group,
security_group_ids=security_group_ids,
additional_info=additional_info,
instance_profile_name=instance_profile_name,
instance_profile_arn=instance_profile_arn,
tenancy=tenancy, dry_run=dry_run)
def deregister(self, delete_snapshot=False, dry_run=False):
return self.connection.deregister_image(
self.id,
delete_snapshot,
dry_run=dry_run
)
def get_launch_permissions(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
return img_attrs.attrs
def set_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'add',
user_ids,
group_names,
dry_run=dry_run)
def remove_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'remove',
user_ids,
group_names,
dry_run=dry_run)
def reset_launch_attributes(self, dry_run=False):
return self.connection.reset_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
def get_kernel(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'kernel',
dry_run=dry_run
)
return img_attrs.kernel
def get_ramdisk(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'ramdisk',
dry_run=dry_run
)
return img_attrs.ramdisk
class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None
self.ramdisk = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self.attrs['block_device_mapping'] = BlockDeviceMapping()
return self.attrs['block_device_mapping']
else:
return None
def endElement(self, name, value, connection):
if name == 'launchPermission':
self.name = 'launch_permission'
elif name == 'group':
if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'productCode':
if 'product_codes' in self.attrs:
self.attrs['product_codes'].append(value)
else:
self.attrs['product_codes'] = [value]
elif name == 'imageId':
self.image_id = value
elif name == 'kernel':
self.kernel = value
elif name == 'ramdisk':
self.ramdisk = value
else:
setattr(self, name, value)
class CopyImage(object):
def __init__(self, parent=None):
self._parent = parent
self.image_id = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
| mit |
grlee77/numpy | benchmarks/benchmarks/common.py | 2 | 2573 | import numpy
import random
# Various pre-crafted datasets/variables for testing
# !!! Must not be changed -- only appended !!!
# while testing numpy we better not rely on numpy to produce random
# sequences
random.seed(1)
# but will seed it nevertheless
numpy.random.seed(1)
nx, ny = 1000, 1000
# reduced squares based on indexes_rand, primarily for testing more
# time-consuming functions (ufunc, linalg, etc)
nxs, nys = 100, 100
# a set of interesting types to test
TYPES1 = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'longfloat', 'complex128',
]
if 'complex256' in numpy.typeDict:
TYPES1.append('complex256')
def memoize(func):
result = []
def wrapper():
if not result:
result.append(func())
return result[0]
return wrapper
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
@memoize
def get_values():
rnd = numpy.random.RandomState(1)
values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10)
return values
@memoize
def get_squares():
values = get_values()
squares = {t: numpy.array(values,
dtype=getattr(numpy, t)).reshape((nx, ny))
for t in TYPES1}
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
for t, v in squares.items():
if t.startswith('complex'):
v += v.T*1j
return squares
@memoize
def get_squares_():
# smaller squares
squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()}
return squares_
@memoize
def get_vectors():
# vectors
vectors = {t: s[0] for t, s in get_squares().items()}
return vectors
@memoize
def get_indexes():
indexes = list(range(nx))
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
indexes = numpy.array(indexes)
return indexes
@memoize
def get_indexes_rand():
rnd = random.Random(1)
indexes_rand = get_indexes().tolist() # copy
rnd.shuffle(indexes_rand) # in-place shuffle
indexes_rand = numpy.array(indexes_rand)
return indexes_rand
@memoize
def get_indexes_():
# smaller versions
indexes = get_indexes()
indexes_ = indexes[indexes < nxs]
return indexes_
@memoize
def get_indexes_rand_():
indexes_rand = get_indexes_rand()
indexes_rand_ = indexes_rand[indexes_rand < nxs]
return indexes_rand_
class Benchmark:
pass
| bsd-3-clause |
antoan2/incubator-mxnet | tests/python/unittest/test_model_parallel.py | 50 | 2560 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
def reldiff(a, b):
diff = np.sum(np.abs(a - b))
norm = np.sum(np.abs(a))
if diff == 0:
return 0
reldiff = diff / norm
return reldiff
def test_chain():
ctx1 = mx.cpu(0)
ctx2 = mx.cpu(1)
n = 2
data1 = mx.sym.Variable('data1')
data2 = mx.sym.Variable('data2')
data3 = mx.sym.Variable('data3')
with mx.AttrScope(ctx_group='dev1'):
net = data1 + data2
net = net * 3
with mx.AttrScope(ctx_group='dev2'):
net = net + data3
arr = []
arr_grad = []
shape = (4, 5)
with mx.Context(ctx1):
for i in range(n):
arr.append(mx.nd.empty(shape))
arr_grad.append(mx.nd.empty(shape))
with mx.Context(ctx2):
arr.append(mx.nd.empty(shape))
arr_grad.append(mx.nd.empty(shape))
exec1 = net.bind(ctx1,
args=arr,
args_grad=arr_grad,
group2ctx={'dev1': ctx1, 'dev2': ctx2})
arr[0][:] = 1.0
arr[1][:] = 2.0
arr[2][:] = 3.0
arr2 = [a.copyto(ctx1) for a in arr]
arr_grad2 = [a.copyto(ctx1) for a in arr_grad]
exec2 = net.bind(ctx1,
args=arr2,
args_grad=arr_grad2)
# Show the execution plan that involves copynode
print(exec1.debug_str())
exec1.forward(is_train=True)
exec2.forward(is_train=True)
assert reldiff(exec1.outputs[0].asnumpy(), exec2.outputs[0].asnumpy()) < 1e-6
out_grad = mx.nd.empty(shape, ctx1)
out_grad[:] = 1.0
exec1.backward([out_grad])
exec2.backward([out_grad.copyto(ctx1)])
for a, b in zip(arr_grad, arr_grad2):
assert reldiff(a.asnumpy(), b.asnumpy()) < 1e-6
if __name__ == '__main__':
test_chain()
| apache-2.0 |
igio/webfaction-meteor | deploy.py | 1 | 4300 | #!/usr/bin/env python3
import os
import subprocess
import paramiko
import argparse
import json
# Manage the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update', action='store_true', help='Update the application')
parser.add_argument('-b', '--build', action='store_true', help='If true, build application before update. Defaults to true.')
parser.add_argument('-f', '--file', help='Settings file')
args = parser.parse_args()
if args.update:
update = True
else:
update = False
if args.build:
build = True
else:
build = False
# Read the settings from file
with open(args.file) as settings_file:
settings = json.load(settings_file)
# Set up some variables for improved legibility
local = settings['local']
server = settings['server']
db = settings['db']
##
# Steps
#
# Build the app
# Upload app at appropriate locations
# Unpack
# Install dependencies
# Generate and upload the startup file
# Run the application
##
# Build the app
temp_folder = os.path.expanduser('~/%s/%s_build' % (local['path'], local['app']))
if build:
cmds = [
'cd %s/%s' % (local['path'], local['app']),
'meteor build %s --architecture os.linux.x86_64 --server %s' % (temp_folder, server['url'])
]
print('Building application...')
output = subprocess.check_output(";".join(cmds), shell=True)
print(output.decode(encoding='utf-8'))
# Connect to server and upload the app built
print('Connecting to the server...')
conn = paramiko.SSHClient()
conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
conn.connect(server['remote'], username=server['user'], password=server['password'])
print('Connection established!')
print('Starting SFTP session...')
sftp = conn.open_sftp()
print('SFTP session open!')
sftp.chdir('webapps/%s' % server['app'])
print('Start uploading app archive...')
sftp.put('%s/%s.tar.gz' % (temp_folder, local['app']), '%s.tar.gz' % local['app'])
print('Upload done!')
# Unpack
print('Extracting archive files...')
cmds = [
'cd ~/webapps/%s' % server['app'],
'rm -rf bundle',
'tar -zxf %s.tar.gz' % local['app'],
'rm %s.tar.gz' % local['app']
]
si, so, se = conn.exec_command(';'.join(cmds))
print(''.join(so.readlines()))
print('Files extracted!')
# Install dependencies
print('Installing dependencies...')
cmds = [
'cd ~/webapps/%s/bundle/programs/server' % server['app'],
'PATH=~/webapps/%s/bin/:$PATH' % server['app'],
'npm install --silent'
]
si, so, se = conn.exec_command(';'.join(cmds))
print(''.join(so.readlines()))
print('Dependencies installed!')
# Generate and upload the startup file
if not update:
print('Generate startup file...')
base = '/home/%s/webapps/%s' % (server['user'], server['app'])
lines = [
'#!/bin/sh',
'mkdir -p %s/run' % base,
'export MONGO_URL=%s' % db['mongodb'],
'export ROOT_URL=%s' % server['url'],
'export PORT=%s' % server['port'],
'pid=$(/sbin/pidof %s/bin/node)' % base,
'if echo "$pid" | grep -q " "; then',
' pid=""',
'fi',
'if [ -n "$pid" ]; then',
' user=$(ps -p $pid -o user:20 | tail -n 1)',
' if [ $user = "gionas" ]; then',
' exit(0)',
' fi',
'fi',
'nohup %s/bin/node %s/bundle/main.js > /dev/null 2>&1 &' % (base, base),
'/sbin/pidof %s/bin/node > %s/run/node.pid' % (base, base)
]
file = open('%s/start' % temp_folder, 'w')
file.write('\n'.join(lines))
print('Remove the current start file...')
cmds = [
'cd ~/webapps/%s/bin' % server['app'],
'rm start'
]
si, so, se = conn.exec_command(';'.join(cmds))
if not se:
print('Start file removed!')
else:
print(''.join(se.readlines()))
exit(1)
print('Uploading new start file...')
sftp.chdir('webapps/%s/bin' % server['app'])
sftp.put('%s/start' % temp_folder)
print('Start file uploaded!')
# Start the application (if everything worked out fine)
print('(re)Starting the app...')
cmds = [
'~/webapps/%s/bin/stop' % server['app'],
'~/webapps/%s/bin/start' % server['app']
]
si, so, se = conn.exec_command(';'.join(cmds))
print('Meteor application started')
conn.close()
print('All done! Good bye!')
| mit |
noodle-learns-programming/wagtail | wagtail/wagtailsearch/tests/test_frontend.py | 27 | 5312 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core import paginator
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.models import Query
from wagtail.tests.testapp.models import EventPage
class TestSearchView(TestCase):
fixtures = ['test.json']
def test_get(self):
response = self.client.get(reverse('wagtailsearch_search'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Check that search_results/query are set to None
self.assertIsNone(response.context['search_results'])
self.assertIsNone(response.context['query'])
def test_search(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Christmas')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
self.assertEqual(response.context['query_string'], "Christmas")
# Check that search_results is an instance of paginator.Page
self.assertIsInstance(response.context['search_results'], paginator.Page)
# Check that the christmas page was in the results (and is the only result)
search_results = response.context['search_results'].object_list
christmas_event_page = Page.objects.get(url_path='/home/events/christmas/')
self.assertEqual(list(search_results), [christmas_event_page])
# Check the query object
self.assertIsInstance(response.context['query'], Query)
query = response.context['query']
self.assertEqual(query.query_string, "christmas")
def pagination_test(test):
def wrapper(*args, **kwargs):
# Create some pages
event_index = Page.objects.get(url_path='/home/events/')
for i in range(100):
event = EventPage(
title="Event " + str(i),
slug='event-' + str(i),
live=True,
)
event_index.add_child(instance=event)
return test(*args, **kwargs)
return wrapper
@pagination_test
def test_get_first_page(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&page=1')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the first page
search_results = response.context['search_results']
self.assertEqual(search_results.number, 1)
@pagination_test
def test_get_10th_page(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&page=10')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the tenth page
search_results = response.context['search_results']
self.assertEqual(search_results.number, 10)
@pagination_test
def test_get_invalid_page(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&page=Not a Page')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the first page
search_results = response.context['search_results']
self.assertEqual(search_results.number, 1)
@pagination_test
def test_get_out_of_range_page(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&page=9999')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the last page
search_results = response.context['search_results']
self.assertEqual(search_results.number, search_results.paginator.num_pages)
@pagination_test
def test_get_zero_page(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&page=0')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the first page
search_results = response.context['search_results']
self.assertEqual(search_results.number, search_results.paginator.num_pages)
@pagination_test
def test_get_10th_page_backwards_compatibility_with_p(self):
response = self.client.get(reverse('wagtailsearch_search') + '?q=Event&p=10')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsearch/search_results.html')
# Test that we got the tenth page
search_results = response.context['search_results']
self.assertEqual(search_results.number, 10)
class TestSuggestionsView(TestCase):
def get(self, params={}):
return self.client.get('/search/suggest/', params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# TODO: Check that a valid JSON document was returned
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
| bsd-3-clause |
rooi/CouchPotatoServer | libs/suds/umx/__init__.py | 203 | 1811 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
unmarshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
@ivar node: The content source node.
@type node: L{sax.element.Element}
@ivar data: The (optional) content data.
@type data: L{Object}
@ivar text: The (optional) content (xml) text.
@type text: basestring
"""
extensions = []
def __init__(self, node, **kwargs):
Object.__init__(self)
self.node = node
self.data = None
self.text = None
for k,v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError, \
'Content has no attribute %s' % name
else:
v = self.__dict__[name]
return v | gpl-3.0 |
talbrecht/pism_pik | test/vnreport.py | 1 | 8967 | #!/usr/bin/env python
from pylab import close, figure, clf, hold, plot, xlabel, ylabel, xticks, yticks, axis, legend, title, grid, show, savefig
from numpy import array, polyfit, polyval, log10, floor, ceil, unique
import sys
try:
from netCDF4 import Dataset as NC
except:
print "netCDF4 is not installed!"
sys.exit(1)
class Plotter:
def __init__(self, save_figures, nc, file_format):
self.save_figures = save_figures
self.nc = nc
self.file_format = file_format
def plot(self, x, vars, testname, plot_title):
# This mask lets us choose data corresponding to a particular test:
test = array(map(chr, self.nc.variables['test'][:]))
mask = (test == testname)
# If we have less than 2 points to plot, then bail.
if (sum(mask) < 2):
print "Skipping Test %s %s (not enough data to plot)" % (testname, plot_title)
return
# Get the independent variable and transform it. Note that everywhere here
# I assume that neither dx (dy, dz) nor errors can be zero or negative.
dx = self.nc.variables[x][mask]
dim = log10(dx)
figure(figsize=(10, 6))
clf()
hold(True)
colors = ['red', 'blue', 'green', 'black', 'brown', 'cyan']
for (v, c) in zip(vars, colors):
# Get a particular variable, transform and fit a line through it:
data = log10(self.nc.variables[v][mask])
p = polyfit(dim, data, 1)
# Try to get the long_name, use short_name if it fails:
try:
name = self.nc.variables[v].long_name
except:
name = v
# Create a label for the independent variable:
if (x == "dx"):
dim_name = "\Delta x"
if (x == "dy"):
dim_name = "\Delta y"
if (x == "dz"):
dim_name = "\Delta z"
if (x == "dzb"):
dim_name = "\Delta z_{bed}"
# Variable label:
var_label = "%s, $O(%s^{%1.2f})$" % (name, dim_name, p[0])
print "Test {} {}: convergence rate: O(dx^{:1.4f})".format(testname, name, p[0])
# Plot errors and the linear fit:
plot(dim, data, label=var_label, marker='o', color=c)
plot(dim, polyval(p, dim), ls="--", color=c)
# Shrink axes, then expand vertically to have integer powers of 10:
axis('tight')
_, _, ymin, ymax = axis()
axis(ymin=floor(ymin), ymax=ceil(ymax))
# Switch to km if dx (dy, dz) are big:
units = self.nc.variables[x].units
if (dx.min() > 1000.0 and (units == "meters")):
dx = dx / 1000.0
units = "km"
# Round grid spacing in x-ticks:
xticks(dim, map(lambda(x): "%d" % x, dx))
xlabel("$%s$ (%s)" % (dim_name, units))
# Use default (figured out by matplotlib) locations, but change labels for y-ticks:
loc, _ = yticks()
yticks(loc, map(lambda(x): "$10^{%1.1f}$" % x, loc))
# Make sure that all variables given have the same units:
try:
ylabels = array(map(lambda(x): self.nc.variables[x].units, vars))
if (any(ylabels != ylabels[0])):
print "Incompatible units!"
else:
ylabel(ylabels[0])
except:
pass
# Legend, grid and the title:
legend(loc='best', borderpad=1, labelspacing=0.5, handletextpad=0.75, handlelength=0.02)
# prop = FontProperties(size='smaller'),
grid(True)
title("Test %s %s (%s)" % (testname, plot_title, self.nc.source))
if self.save_figures:
filename = "%s_%s_%s.%s" % (self.nc.source.replace(" ", "_"),
testname.replace(" ", "_"),
plot_title.replace(" ", "_"),
self.file_format)
savefig(filename)
def plot_tests(self, list_of_tests):
for test_name in list_of_tests:
# thickness, volume and eta errors:
if test_name in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'L']:
self.plot('dx', ["maximum_thickness", "average_thickness"], test_name, "ice thickness errors")
self.plot('dx', ["relative_volume"], test_name, "relative ice volume errors")
self.plot('dx', ["relative_max_eta"], test_name, r"relative max eta errors")
# errors that are reported for test E only:
if (test_name == 'E'):
self.plot('dx', ["maximum_basal_velocity", "average_basal_velocity"], 'E', r"basal velocity errors")
self.plot('dx', ["maximum_basal_u", "maximum_basal_v"], 'E', "basal velocity (ub and vb) errors")
self.plot('dx', ["relative_basal_velocity"], 'E', "relative basal velocity errors")
# F and G temperature, sigma and velocity errors:
if test_name in ['F', 'G']:
self.plot('dx', ["maximum_sigma", "average_sigma"],
test_name, "strain heating errors")
self.plot('dx', ["maximum_temperature", "average_temperature",
"maximum_basal_temperature", "average_basal_temperature"],
test_name, "ice temperature errors")
self.plot('dx', ["maximum_surface_velocity", "maximum_surface_w"],
test_name, "maximum ice surface velocity errors")
self.plot('dx', ["average_surface_velocity", "average_surface_w"],
test_name, "average ice surface velocity errors")
# test I: plot only the u component
if test_name == 'I':
self.plot('dy', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dy', ["maximum_u", "average_u"],
test_name, "velocity errors")
# tests J and M:
if test_name in ['J', 'M']:
self.plot('dx', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dx', ["max_velocity", "maximum_u", "average_u", "maximum_v", "average_v"],
test_name, "velocity errors")
# test K temperature errors:
if (test_name == 'K'):
self.plot('dz', ["maximum_temperature", "average_temperature",
"maximum_bedrock_temperature", "average_bedrock_temperature"],
'K', "temperature errors")
# test O temperature and basal melt rate errors:
if (test_name == 'O'):
self.plot('dz', ["maximum_temperature", "average_temperature",
"maximum_bedrock_temperature", "average_bedrock_temperature"],
'K', "temperature errors")
self.plot('dz', ["maximum_basal_melt_rate"],
'O', "basal melt rate errors")
# test V: plot only the u component
if test_name == 'V':
self.plot('dx', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dx', ["maximum_u", "average_u"],
test_name, "velocity errors")
from argparse import ArgumentParser
parser = ArgumentParser()
parser.description = """Plot script for PISM verification results."""
parser.add_argument("filename",
help="The NetCDF error report file name, usually produces by running vfnow.py")
parser.add_argument("-t", nargs="+", dest="tests_to_plot", default=None,
help="Test results to plot (space-delimited list)")
parser.add_argument("--save_figures", dest="save_figures", action="store_true",
help="Save figures to .png files")
parser.add_argument("--file_format", dest="file_format", default="png",
help="File format for --save_figures (png, pdf, jpg, ...)")
options = parser.parse_args()
input_file = NC(options.filename, 'r')
available_tests = unique(array(map(chr, input_file.variables['test'][:])))
tests_to_plot = options.tests_to_plot
if len(available_tests) == 1:
if tests_to_plot == None:
tests_to_plot = available_tests
else:
if (tests_to_plot == None):
print """Please choose tests to plot using the -t option.
(Input file %s has reports for tests %s available.)""" % (input, str(available_tests))
sys.exit(0)
if (tests_to_plot[0] == "all"):
tests_to_plot = available_tests
close('all')
p = Plotter(options.save_figures, input_file, options.file_format)
p.plot_tests(tests_to_plot)
try:
# show() will break if we didn't plot anything
if not options.save_figures:
show()
except:
pass
| gpl-3.0 |
ppapadeas/wprevents | vendor-local/lib/python/unidecode/x0ca.py | 253 | 5007 | data = (
'jjael', # 0x00
'jjaelg', # 0x01
'jjaelm', # 0x02
'jjaelb', # 0x03
'jjaels', # 0x04
'jjaelt', # 0x05
'jjaelp', # 0x06
'jjaelh', # 0x07
'jjaem', # 0x08
'jjaeb', # 0x09
'jjaebs', # 0x0a
'jjaes', # 0x0b
'jjaess', # 0x0c
'jjaeng', # 0x0d
'jjaej', # 0x0e
'jjaec', # 0x0f
'jjaek', # 0x10
'jjaet', # 0x11
'jjaep', # 0x12
'jjaeh', # 0x13
'jjya', # 0x14
'jjyag', # 0x15
'jjyagg', # 0x16
'jjyags', # 0x17
'jjyan', # 0x18
'jjyanj', # 0x19
'jjyanh', # 0x1a
'jjyad', # 0x1b
'jjyal', # 0x1c
'jjyalg', # 0x1d
'jjyalm', # 0x1e
'jjyalb', # 0x1f
'jjyals', # 0x20
'jjyalt', # 0x21
'jjyalp', # 0x22
'jjyalh', # 0x23
'jjyam', # 0x24
'jjyab', # 0x25
'jjyabs', # 0x26
'jjyas', # 0x27
'jjyass', # 0x28
'jjyang', # 0x29
'jjyaj', # 0x2a
'jjyac', # 0x2b
'jjyak', # 0x2c
'jjyat', # 0x2d
'jjyap', # 0x2e
'jjyah', # 0x2f
'jjyae', # 0x30
'jjyaeg', # 0x31
'jjyaegg', # 0x32
'jjyaegs', # 0x33
'jjyaen', # 0x34
'jjyaenj', # 0x35
'jjyaenh', # 0x36
'jjyaed', # 0x37
'jjyael', # 0x38
'jjyaelg', # 0x39
'jjyaelm', # 0x3a
'jjyaelb', # 0x3b
'jjyaels', # 0x3c
'jjyaelt', # 0x3d
'jjyaelp', # 0x3e
'jjyaelh', # 0x3f
'jjyaem', # 0x40
'jjyaeb', # 0x41
'jjyaebs', # 0x42
'jjyaes', # 0x43
'jjyaess', # 0x44
'jjyaeng', # 0x45
'jjyaej', # 0x46
'jjyaec', # 0x47
'jjyaek', # 0x48
'jjyaet', # 0x49
'jjyaep', # 0x4a
'jjyaeh', # 0x4b
'jjeo', # 0x4c
'jjeog', # 0x4d
'jjeogg', # 0x4e
'jjeogs', # 0x4f
'jjeon', # 0x50
'jjeonj', # 0x51
'jjeonh', # 0x52
'jjeod', # 0x53
'jjeol', # 0x54
'jjeolg', # 0x55
'jjeolm', # 0x56
'jjeolb', # 0x57
'jjeols', # 0x58
'jjeolt', # 0x59
'jjeolp', # 0x5a
'jjeolh', # 0x5b
'jjeom', # 0x5c
'jjeob', # 0x5d
'jjeobs', # 0x5e
'jjeos', # 0x5f
'jjeoss', # 0x60
'jjeong', # 0x61
'jjeoj', # 0x62
'jjeoc', # 0x63
'jjeok', # 0x64
'jjeot', # 0x65
'jjeop', # 0x66
'jjeoh', # 0x67
'jje', # 0x68
'jjeg', # 0x69
'jjegg', # 0x6a
'jjegs', # 0x6b
'jjen', # 0x6c
'jjenj', # 0x6d
'jjenh', # 0x6e
'jjed', # 0x6f
'jjel', # 0x70
'jjelg', # 0x71
'jjelm', # 0x72
'jjelb', # 0x73
'jjels', # 0x74
'jjelt', # 0x75
'jjelp', # 0x76
'jjelh', # 0x77
'jjem', # 0x78
'jjeb', # 0x79
'jjebs', # 0x7a
'jjes', # 0x7b
'jjess', # 0x7c
'jjeng', # 0x7d
'jjej', # 0x7e
'jjec', # 0x7f
'jjek', # 0x80
'jjet', # 0x81
'jjep', # 0x82
'jjeh', # 0x83
'jjyeo', # 0x84
'jjyeog', # 0x85
'jjyeogg', # 0x86
'jjyeogs', # 0x87
'jjyeon', # 0x88
'jjyeonj', # 0x89
'jjyeonh', # 0x8a
'jjyeod', # 0x8b
'jjyeol', # 0x8c
'jjyeolg', # 0x8d
'jjyeolm', # 0x8e
'jjyeolb', # 0x8f
'jjyeols', # 0x90
'jjyeolt', # 0x91
'jjyeolp', # 0x92
'jjyeolh', # 0x93
'jjyeom', # 0x94
'jjyeob', # 0x95
'jjyeobs', # 0x96
'jjyeos', # 0x97
'jjyeoss', # 0x98
'jjyeong', # 0x99
'jjyeoj', # 0x9a
'jjyeoc', # 0x9b
'jjyeok', # 0x9c
'jjyeot', # 0x9d
'jjyeop', # 0x9e
'jjyeoh', # 0x9f
'jjye', # 0xa0
'jjyeg', # 0xa1
'jjyegg', # 0xa2
'jjyegs', # 0xa3
'jjyen', # 0xa4
'jjyenj', # 0xa5
'jjyenh', # 0xa6
'jjyed', # 0xa7
'jjyel', # 0xa8
'jjyelg', # 0xa9
'jjyelm', # 0xaa
'jjyelb', # 0xab
'jjyels', # 0xac
'jjyelt', # 0xad
'jjyelp', # 0xae
'jjyelh', # 0xaf
'jjyem', # 0xb0
'jjyeb', # 0xb1
'jjyebs', # 0xb2
'jjyes', # 0xb3
'jjyess', # 0xb4
'jjyeng', # 0xb5
'jjyej', # 0xb6
'jjyec', # 0xb7
'jjyek', # 0xb8
'jjyet', # 0xb9
'jjyep', # 0xba
'jjyeh', # 0xbb
'jjo', # 0xbc
'jjog', # 0xbd
'jjogg', # 0xbe
'jjogs', # 0xbf
'jjon', # 0xc0
'jjonj', # 0xc1
'jjonh', # 0xc2
'jjod', # 0xc3
'jjol', # 0xc4
'jjolg', # 0xc5
'jjolm', # 0xc6
'jjolb', # 0xc7
'jjols', # 0xc8
'jjolt', # 0xc9
'jjolp', # 0xca
'jjolh', # 0xcb
'jjom', # 0xcc
'jjob', # 0xcd
'jjobs', # 0xce
'jjos', # 0xcf
'jjoss', # 0xd0
'jjong', # 0xd1
'jjoj', # 0xd2
'jjoc', # 0xd3
'jjok', # 0xd4
'jjot', # 0xd5
'jjop', # 0xd6
'jjoh', # 0xd7
'jjwa', # 0xd8
'jjwag', # 0xd9
'jjwagg', # 0xda
'jjwags', # 0xdb
'jjwan', # 0xdc
'jjwanj', # 0xdd
'jjwanh', # 0xde
'jjwad', # 0xdf
'jjwal', # 0xe0
'jjwalg', # 0xe1
'jjwalm', # 0xe2
'jjwalb', # 0xe3
'jjwals', # 0xe4
'jjwalt', # 0xe5
'jjwalp', # 0xe6
'jjwalh', # 0xe7
'jjwam', # 0xe8
'jjwab', # 0xe9
'jjwabs', # 0xea
'jjwas', # 0xeb
'jjwass', # 0xec
'jjwang', # 0xed
'jjwaj', # 0xee
'jjwac', # 0xef
'jjwak', # 0xf0
'jjwat', # 0xf1
'jjwap', # 0xf2
'jjwah', # 0xf3
'jjwae', # 0xf4
'jjwaeg', # 0xf5
'jjwaegg', # 0xf6
'jjwaegs', # 0xf7
'jjwaen', # 0xf8
'jjwaenj', # 0xf9
'jjwaenh', # 0xfa
'jjwaed', # 0xfb
'jjwael', # 0xfc
'jjwaelg', # 0xfd
'jjwaelm', # 0xfe
'jjwaelb', # 0xff
)
| bsd-3-clause |
ellipsis14/dolfin | demo/undocumented/parallel-refinement/python/demo_parallel-refinement.py | 5 | 1661 | # Copyright (C) 2013 Chris N. Richardson
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2013-04-26
# Last changed: 2013-04-26
from dolfin import *
# Create mesh
mesh = UnitSquareMesh(20, 20)
# Create MeshFunction to hold cell process rank
processes = CellFunction('size_t', mesh, MPI.rank(mesh.mpi_comm()))
# Output cell distribution to VTK file
file = File("processes.pvd")
file << processes
# Mark all cells on process 0 for refinement
marker = CellFunction('bool', mesh, (MPI.rank(mesh.mpi_comm()) == 0))
# Refine mesh, but keep all news cells on parent process
mesh0 = refine(mesh, marker, False)
# Create MeshFunction to hold cell process rank for refined mesh
processes1 = CellFunction('size_t', mesh0, MPI.rank(mesh.mpi_comm()))
file << processes1
# Refine mesh, but this time repartition the mesh after refinement
mesh1 = refine(mesh, marker, True)
# Create MeshFunction to hold cell process rank for refined mesh
processes2 = CellFunction('size_t', mesh1, MPI.rank(mesh.mpi_comm()))
file << processes2
| gpl-3.0 |
team-vigir/vigir_behaviors | behaviors/vigir_behavior_walk_and_grasp_demo/src/vigir_behavior_walk_and_grasp_demo/walk_and_grasp_demo_sm.py | 1 | 13360 | #!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_walk_and_grasp_demo')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from vigir_flexbe_states.tilt_head_state import TiltHeadState
from flexbe_states.input_state import InputState
from flexbe_states.log_state import LogState
from flexbe_states.calculation_state import CalculationState
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_flexbe_states.moveit_predefined_pose_state import MoveitPredefinedPoseState
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from vigir_flexbe_states.footstep_plan_realign_center_state import FootstepPlanRealignCenterState
from vigir_flexbe_states.execute_step_plan_action_state import ExecuteStepPlanActionState
from vigir_flexbe_states.footstep_plan_relative_state import FootstepPlanRelativeState
from vigir_flexbe_states.check_current_control_mode_state import CheckCurrentControlModeState
from vigir_behavior_walk_to_template.walk_to_template_sm import WalktoTemplateSM
from vigir_behavior_manipulation_config.manipulation_config_sm import ManipulationConfigSM
from vigir_behavior_pickup_object.pickup_object_sm import PickupObjectSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Tue Jan 20 2015
@author: Philipp Schillinger
'''
class WalkandGraspDemoSM(Behavior):
'''
Demo behavior for the scenario: Walk to the table and grab the power drill.
'''
def __init__(self):
super(WalkandGraspDemoSM, self).__init__()
self.name = 'Walk and Grasp Demo'
# parameters of this behavior
self.add_parameter('hand_side', 'left')
# references to used behaviors
self.add_behavior(WalktoTemplateSM, 'Walk to Template')
self.add_behavior(ManipulationConfigSM, 'Manipulation Config')
self.add_behavior(PickupObjectSM, 'Pickup Object')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 144 150 /Use_The_Drill/Lift_Drill
# Should be able to remove this part soon.
def create(self):
# x:787 y:587, x:280 y:361
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.hand_side = self.hand_side
_state_machine.userdata.none = None
_state_machine.userdata.grasp_preference = 0
_state_machine.userdata.step_back_distance = 0.5 # m
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:686 y:240, x:384 y:196
_sm_perform_walking_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['none', 'step_back_distance'])
with _sm_perform_walking_0:
# x:64 y:78
OperatableStateMachine.add('Plan_Realign_Feet',
FootstepPlanRealignCenterState(),
transitions={'planned': 'Execute_Realign_Feet', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Low},
remapping={'plan_header': 'plan_header'})
# x:624 y:378
OperatableStateMachine.add('Perform_Step_Back',
ExecuteStepPlanActionState(),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'plan_header': 'plan_header'})
# x:328 y:378
OperatableStateMachine.add('Plan_Step_Back',
FootstepPlanRelativeState(direction=FootstepPlanRelativeState.DIRECTION_BACKWARD),
transitions={'planned': 'Perform_Step_Back', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Low},
remapping={'distance': 'step_back_distance', 'plan_header': 'plan_header'})
# x:74 y:190
OperatableStateMachine.add('Execute_Realign_Feet',
ExecuteStepPlanActionState(),
transitions={'finished': 'Wait_For_Stand', 'failed': 'failed'},
autonomy={'finished': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'plan_header': 'plan_header'})
# x:66 y:316
OperatableStateMachine.add('Wait_For_Stand',
CheckCurrentControlModeState(target_mode=CheckCurrentControlModeState.STAND, wait=True),
transitions={'correct': 'Plan_Step_Back', 'incorrect': 'failed'},
autonomy={'correct': Autonomy.Low, 'incorrect': Autonomy.Full},
remapping={'control_mode': 'control_mode'})
# x:733 y:290, x:133 y:290
_sm_back_to_stand_1 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['none'])
with _sm_back_to_stand_1:
# x:66 y:78
OperatableStateMachine.add('Set_Manipulate',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.MANIPULATE),
transitions={'changed': 'Stand_Posture', 'failed': 'failed'},
autonomy={'changed': Autonomy.High, 'failed': Autonomy.Low})
# x:376 y:78
OperatableStateMachine.add('Stand_Posture',
MoveitPredefinedPoseState(target_pose=MoveitPredefinedPoseState.STAND_POSE, vel_scaling=0.3, ignore_collisions=False, link_paddings={}),
transitions={'done': 'Set_Stand', 'failed': 'Set_Stand'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'side': 'none'})
# x:666 y:78
OperatableStateMachine.add('Set_Stand',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND),
transitions={'changed': 'finished', 'failed': 'finished'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.Low})
# x:120 y:404, x:298 y:222
_sm_step_back_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'none', 'template_id', 'grasp_preference', 'step_back_distance'])
with _sm_step_back_2:
# x:76 y:28
OperatableStateMachine.add('Go_To_Stand_Posture',
MoveitPredefinedPoseState(target_pose=MoveitPredefinedPoseState.STAND_POSE, vel_scaling=0.1, ignore_collisions=False, link_paddings={}),
transitions={'done': 'Set_To_Stand_Manipulate', 'failed': 'Set_To_Stand_Manipulate'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'side': 'none'})
# x:66 y:140
OperatableStateMachine.add('Set_To_Stand_Manipulate',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND_MANIPULATE),
transitions={'changed': 'Perform_Walking', 'failed': 'failed'},
autonomy={'changed': Autonomy.High, 'failed': Autonomy.Low})
# x:84 y:272
OperatableStateMachine.add('Perform_Walking',
_sm_perform_walking_0,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'none': 'none', 'step_back_distance': 'step_back_distance'})
# x:755 y:48, x:401 y:428
_sm_preparation_3 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['none'], output_keys=['template_id'])
with _sm_preparation_3:
# x:66 y:164
OperatableStateMachine.add('Head_Look_Straight',
TiltHeadState(desired_tilt=TiltHeadState.STRAIGHT),
transitions={'done': 'Place_Template', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low})
# x:566 y:163
OperatableStateMachine.add('Request_Template_ID',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Provide the ID of the placed template."),
transitions={'received': 'finished', 'aborted': 'failed', 'no_connection': 'Log_No_Connection', 'data_error': 'Log_Data_Error'},
autonomy={'received': Autonomy.High, 'aborted': Autonomy.High, 'no_connection': Autonomy.Low, 'data_error': Autonomy.Low},
remapping={'data': 'template_id'})
# x:287 y:204
OperatableStateMachine.add('Log_No_Connection',
LogState(text="Have no connection to OCS!", severity=Logger.REPORT_ERROR),
transitions={'done': 'Decide_Input'},
autonomy={'done': Autonomy.Off})
# x:344 y:144
OperatableStateMachine.add('Log_Data_Error',
LogState(text="Received wrong data format!", severity=Logger.REPORT_ERROR),
transitions={'done': 'Decide_Input'},
autonomy={'done': Autonomy.Off})
# x:483 y:44
OperatableStateMachine.add('Fake_Input',
CalculationState(calculation=lambda x: 0),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'none', 'output_value': 'template_id'})
# x:236 y:40
OperatableStateMachine.add('Decide_Input',
OperatorDecisionState(outcomes=['fake_id', 'ocs_request'], hint="How do you want to provide the template?", suggestion=None),
transitions={'fake_id': 'Fake_Input', 'ocs_request': 'Request_Template_ID'},
autonomy={'fake_id': Autonomy.Full, 'ocs_request': Autonomy.Full})
# x:78 y:40
OperatableStateMachine.add('Place_Template',
LogState(text="Please place the drill template.", severity=Logger.REPORT_HINT),
transitions={'done': 'Decide_Input'},
autonomy={'done': Autonomy.Full})
with _state_machine:
# x:44 y:72
OperatableStateMachine.add('Preparation',
_sm_preparation_3,
transitions={'finished': 'Ask_Perform_Walking', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'none': 'none', 'template_id': 'template_id'})
# x:547 y:571
OperatableStateMachine.add('Step_Back',
_sm_step_back_2,
transitions={'finished': 'finished', 'failed': 'Back_To_Stand'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'none': 'none', 'template_id': 'template_id', 'grasp_preference': 'grasp_preference', 'step_back_distance': 'step_back_distance'})
# x:284 y:78
OperatableStateMachine.add('Ask_Perform_Walking',
OperatorDecisionState(outcomes=['walk', 'stand'], hint="Does the robot need to walk to the table?", suggestion='walk'),
transitions={'walk': 'Walk to Template', 'stand': 'Manipulation Config'},
autonomy={'walk': Autonomy.High, 'stand': Autonomy.Full})
# x:531 y:22
OperatableStateMachine.add('Walk to Template',
self.use_behavior(WalktoTemplateSM, 'Walk to Template'),
transitions={'finished': 'Manipulation Config', 'failed': 'Walk_Manually', 'aborted': 'Walk_Manually'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'aborted': Autonomy.Inherit},
remapping={'grasp_preference': 'grasp_preference', 'hand_side': 'hand_side', 'template_id': 'template_id'})
# x:525 y:143
OperatableStateMachine.add('Manipulation Config',
self.use_behavior(ManipulationConfigSM, 'Manipulation Config'),
transitions={'finished': 'Head_Look_Down', 'failed': 'Manipulation_Config_Manually'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:264 y:228
OperatableStateMachine.add('Manipulation_Config_Manually',
OperatorDecisionState(outcomes=["done", "abort"], hint="Make sure the robot is ready to grasp", suggestion=None),
transitions={'done': 'Pickup Object', 'abort': 'failed'},
autonomy={'done': Autonomy.Full, 'abort': Autonomy.Full})
# x:769 y:78
OperatableStateMachine.add('Walk_Manually',
LogState(text="Guide the robot to the template manually.", severity=Logger.REPORT_HINT),
transitions={'done': 'Manipulation Config'},
autonomy={'done': Autonomy.Full})
# x:245 y:485
OperatableStateMachine.add('Back_To_Stand',
_sm_back_to_stand_1,
transitions={'finished': 'failed', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'none': 'none'})
# x:538 y:353
OperatableStateMachine.add('Pickup Object',
self.use_behavior(PickupObjectSM, 'Pickup Object'),
transitions={'finished': 'Head_Look_Straight', 'failed': 'Back_To_Stand'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'template_id': 'template_id'})
# x:545 y:257
OperatableStateMachine.add('Head_Look_Down',
TiltHeadState(desired_tilt=TiltHeadState.DOWN_45),
transitions={'done': 'Pickup Object', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low})
# x:540 y:473
OperatableStateMachine.add('Head_Look_Straight',
TiltHeadState(desired_tilt=TiltHeadState.STRAIGHT),
transitions={'done': 'Step_Back', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| bsd-3-clause |
dkubiak789/odoo | addons/stock_landed_costs/__openerp__.py | 220 | 1914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'WMS Landed Costs',
'version': '1.1',
'author': 'OpenERP SA',
'summary': 'Landed Costs',
'description': """
Landed Costs Management
=======================
This module allows you to easily add extra costs on pickings and decide the split of these costs among their stock moves in order to take them into account in your stock valuation.
""",
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['stock_account'],
'category': 'Warehouse Management',
'sequence': 16,
'demo': [
],
'data': [
'security/ir.model.access.csv',
'stock_landed_costs_sequence.xml',
'product_view.xml',
'stock_landed_costs_view.xml',
'stock_landed_costs_data.xml',
],
'test': [
'test/stock_landed_costs.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/measure/tests/test_structural_similarity.py | 3 | 6671 | import os
import numpy as np
from skimage import data, data_dir
from skimage.measure import compare_ssim as ssim
from skimage._shared import testing
from skimage._shared._warnings import expected_warnings
from skimage._shared.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal)
np.random.seed(5)
cam = data.camera()
sigma = 20.0
cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255)
cam_noisy = cam_noisy.astype(cam.dtype)
np.random.seed(1234)
def test_ssim_patch_range():
N = 51
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
assert(ssim(X, Y, win_size=N) < 0.1)
assert_equal(ssim(X, X, win_size=N), 1)
def test_ssim_image():
N = 100
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
S0 = ssim(X, X, win_size=3)
assert_equal(S0, 1)
S1 = ssim(X, Y, win_size=3)
assert(S1 < 0.3)
S2 = ssim(X, Y, win_size=11, gaussian_weights=True)
assert(S2 < 0.3)
mssim0, S3 = ssim(X, Y, full=True)
assert_equal(S3.shape, X.shape)
mssim = ssim(X, Y)
assert_equal(mssim0, mssim)
# ssim of image with itself should be 1.0
assert_equal(ssim(X, X), 1.0)
# NOTE: This test is known to randomly fail on some systems (Mac OS X 10.6)
def test_ssim_grad():
N = 30
X = np.random.rand(N, N) * 255
Y = np.random.rand(N, N) * 255
f = ssim(X, Y, data_range=255)
g = ssim(X, Y, data_range=255, gradient=True)
assert f < 0.05
assert g[0] < 0.05
assert np.all(g[1] < 0.05)
mssim, grad, s = ssim(X, Y, data_range=255, gradient=True, full=True)
assert np.all(grad < 0.05)
def test_ssim_dtype():
N = 30
X = np.random.rand(N, N)
Y = np.random.rand(N, N)
S1 = ssim(X, Y)
X = (X * 255).astype(np.uint8)
Y = (X * 255).astype(np.uint8)
S2 = ssim(X, Y)
assert S1 < 0.1
assert S2 < 0.1
def test_ssim_multichannel():
N = 100
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
S1 = ssim(X, Y, win_size=3)
# replicate across three channels. should get identical value
Xc = np.tile(X[..., np.newaxis], (1, 1, 3))
Yc = np.tile(Y[..., np.newaxis], (1, 1, 3))
S2 = ssim(Xc, Yc, multichannel=True, win_size=3)
assert_almost_equal(S1, S2)
# full case should return an image as well
m, S3 = ssim(Xc, Yc, multichannel=True, full=True)
assert_equal(S3.shape, Xc.shape)
# gradient case
m, grad = ssim(Xc, Yc, multichannel=True, gradient=True)
assert_equal(grad.shape, Xc.shape)
# full and gradient case
m, grad, S3 = ssim(Xc, Yc, multichannel=True, full=True, gradient=True)
assert_equal(grad.shape, Xc.shape)
assert_equal(S3.shape, Xc.shape)
# fail if win_size exceeds any non-channel dimension
with testing.raises(ValueError):
ssim(Xc, Yc, win_size=7, multichannel=False)
def test_ssim_nD():
# test 1D through 4D on small random arrays
N = 10
for ndim in range(1, 5):
xsize = [N, ] * 5
X = (np.random.rand(*xsize) * 255).astype(np.uint8)
Y = (np.random.rand(*xsize) * 255).astype(np.uint8)
mssim = ssim(X, Y, win_size=3)
assert mssim < 0.05
def test_ssim_multichannel_chelsea():
# color image example
Xc = data.chelsea()
sigma = 15.0
Yc = np.clip(Xc + sigma * np.random.randn(*Xc.shape), 0, 255)
Yc = Yc.astype(Xc.dtype)
# multichannel result should be mean of the individual channel results
mssim = ssim(Xc, Yc, multichannel=True)
mssim_sep = [ssim(Yc[..., c], Xc[..., c]) for c in range(Xc.shape[-1])]
assert_almost_equal(mssim, np.mean(mssim_sep))
# ssim of image with itself should be 1.0
assert_equal(ssim(Xc, Xc, multichannel=True), 1.0)
def test_gaussian_mssim_vs_IPOL():
# Tests vs. imdiff result from the following IPOL article and code:
# http://www.ipol.im/pub/art/2011/g_lmii/
mssim_IPOL = 0.327309966087341
mssim = ssim(cam, cam_noisy, gaussian_weights=True,
use_sample_covariance=False)
assert_almost_equal(mssim, mssim_IPOL, decimal=3)
def test_gaussian_mssim_vs_author_ref():
"""
test vs. result from original author's Matlab implementation available at
https://ece.uwaterloo.ca/~z70wang/research/ssim/
Matlab test code:
img1 = imread('camera.png')
img2 = imread('camera_noisy.png')
mssim = ssim_index(img1, img2)
"""
mssim_matlab = 0.327314295673357
mssim = ssim(cam, cam_noisy, gaussian_weights=True,
use_sample_covariance=False)
assert_almost_equal(mssim, mssim_matlab, decimal=3)
def test_gaussian_mssim_and_gradient_vs_Matlab():
# comparison to Matlab implementation of N. Avanaki:
# https://ece.uwaterloo.ca/~nnikvand/Coderep/SHINE%20TOOLBOX/SHINEtoolbox/
# Note: final line of ssim_sens.m was modified to discard image borders
ref = np.load(os.path.join(data_dir, 'mssim_matlab_output.npz'))
grad_matlab = ref['grad_matlab']
mssim_matlab = float(ref['mssim_matlab'])
mssim, grad = ssim(cam, cam_noisy, gaussian_weights=True, gradient=True,
use_sample_covariance=False)
assert_almost_equal(mssim, mssim_matlab, decimal=3)
# check almost equal aside from object borders
assert_array_almost_equal(grad_matlab[5:-5], grad[5:-5])
def test_mssim_vs_legacy():
# check that ssim with default options matches skimage 0.11 result
mssim_skimage_0pt11 = 0.34192589699605191
mssim = ssim(cam, cam_noisy)
assert_almost_equal(mssim, mssim_skimage_0pt11)
def test_mssim_mixed_dtype():
mssim = ssim(cam, cam_noisy)
with expected_warnings(['Inputs have mismatched dtype']):
mssim_mixed = ssim(cam, cam_noisy.astype(np.float32))
assert_almost_equal(mssim, mssim_mixed)
# no warning when user supplies data_range
mssim_mixed = ssim(cam, cam_noisy.astype(np.float32), data_range=255)
assert_almost_equal(mssim, mssim_mixed)
def test_invalid_input():
# size mismatch
X = np.zeros((9, 9), dtype=np.double)
Y = np.zeros((8, 8), dtype=np.double)
with testing.raises(ValueError):
ssim(X, Y)
# win_size exceeds image extent
with testing.raises(ValueError):
ssim(X, X, win_size=X.shape[0] + 1)
# some kwarg inputs must be non-negative
with testing.raises(ValueError):
ssim(X, X, K1=-0.1)
with testing.raises(ValueError):
ssim(X, X, K2=-0.1)
with testing.raises(ValueError):
ssim(X, X, sigma=-1.0)
| gpl-3.0 |
kate-v-stepanova/scilifelab | scilifelab/report/delivery_notes.py | 4 | 36997 | """Module delivery_notes - code for generating delivery reports and notes"""
import os
import re
import itertools
import ast
import json
import math
import csv
import yaml
import texttable
import unicodedata
from cStringIO import StringIO
from collections import Counter
from scilifelab.db.statusdb import SampleRunMetricsConnection, ProjectSummaryConnection, FlowcellRunMetricsConnection, calc_avg_qv
from scilifelab.utils.misc import query_ok
from scilifelab.report import sequencing_success
from scilifelab.report.rst import make_sample_rest_notes, make_rest_note
from scilifelab.report.rl import make_note, concatenate_notes, sample_note_paragraphs, sample_note_headers, project_note_paragraphs, project_note_headers, make_sample_table
import scilifelab.log
LOG = scilifelab.log.minimal_logger(__name__)
# Software versions used in data production. Instrument specific?
software_versions = {
'baseconversion_version' : 'OLB v1.9',
'casava_version' : 'CASAVA v1.8.2'
}
def _parse_instrument_config(cfile):
"""Parse a supplied yaml file with instrument ids and associated metadata and return a list of dicts
"""
if cfile is None or not os.path.exists(cfile):
LOG.warn("No instrument config file supplied, will use default value")
return [{'instrument_id': 'default', 'instrument_alias': 'NN', 'instrument_version': 'NN'}]
with open(cfile) as fh:
return yaml.load(fh)
# http://stackoverflow.com/questions/3154460/python-human-readable-large-numbers
def _round_read_count_in_millions(n):
"""Round absolute read counts to million reads"""
LOG.debug("Rounding read count: got {}".format(n))
if n is None:
return None
if n == 0:
return 0
round_factor = [2,2,1]
millidx = max(0, min(len(round_factor) - 1, int(math.floor(math.log10(abs(int(n)))/3.0))))
return round(float(n)/10**(6),round_factor[millidx])
def _get_ordered_million_reads(sample_name, ordered_million_reads):
"""Retrieve ordered million reads for sample
:param sample_name: sample name (possibly barcode name)
:param ordered_million_reads: parsed option passed to application
:returns: ordered number of reads or None"""
if isinstance(ordered_million_reads, dict):
if sample_name in ordered_million_reads:
return ordered_million_reads[sample_name]
else:
return ordered_million_reads.get("default", -1)
else:
return ordered_million_reads
def _get_phix_error_rate(lane, phix):
"""Set phix error rate for a sample based on lane
:param lane: lane
:param phix: parsed option passed to application
:returns: phix error rate or None"""
if isinstance(phix, dict):
if int(lane) in phix:
return phix[int(lane)]
else:
return -1
else:
return phix
def _get_bc_count(sample_name, bc_count, sample_run):
"""Retrieve barcode count for a sample
:param sample_name: sample name
:param bc_count: parsed option passed to application
:param sample_run: sample run object
:returns: barcode count or None"""
if isinstance(bc_count, dict):
if sample_name in bc_count:
return bc_count[sample_name]
else:
return bc_count.get("default", sample_run.get("bc_count", -1))
else:
return bc_count
def _assert_flowcell_format(flowcell):
"""Assert name of flowcell: "[A-Z0-9\-]+"
:param flowcell: flowcell id
:returns: boolean
"""
if flowcell is None:
# Can this really be right?!?
return True
if not re.match("[A-Z0-9\-]+$", flowcell):
return False
return True
def _set_sample_run_list(project_name, flowcell, project_alias, s_con):
"""Set sample run list.
:param project_name: project name
:param flowcell: flowcell id
:param project_alias: project alias argument passed to pm
:param s_con: sample run connection
:returns: sample_run_list
"""
sample_run_list = s_con.get_samples(sample_prj=project_name, fc_id=flowcell)
if not project_alias:
return sample_run_list
project_alias = ast.literal_eval(project_alias)
for p_alias in project_alias:
sample_run_list_tmp = s_con.get_samples(sample_prj=p_alias, fc_id=flowcell)
if sample_run_list_tmp:
sample_run_list.extend(sample_run_list_tmp)
return sample_run_list
def _literal_eval_option(option, default=None):
"""Literally evaluate passed option.
:param option: option passed to pm, which could be a file name
:param default: default value of option
:returns: parsed option
"""
if not option:
return default
if os.path.exists(option):
with open(option) as fh:
option = json.load(fh)
else:
option = ast.literal_eval(option)
return option
def _update_sample_output_data(output_data, cutoffs):
"""Update sample output data dictionary.
:param output_data: output data dictionary
:param cutoffs: cutoffs dictionary
:returns: updated output data dictionary
"""
output_data["stdout"].write("\nQuality stats\n")
output_data["stdout"].write("************************\n")
output_data["stdout"].write("PhiX error cutoff: > {:3}\n".format(cutoffs['phix_err_cutoff']))
output_data["stdout"].write("QV cutoff : < {:3}\n".format(cutoffs['qv_cutoff']))
output_data["stdout"].write("************************\n\n")
output_data["stdout"].write("{:>18}\t{:>6}\t{:>12}\t{:>12}\t{:>12}\t{:>12}\n".format("Scilifelab ID", "Lane", "PhiXError", "ErrorStatus", "AvgQV", "QVStatus"))
output_data["stdout"].write("{:>18}\t{:>6}\t{:>12}\t{:>12}\t{:>12}\t{:>12}\n".format("=============", "====", "=========", "===========", "=====", "========"))
return output_data
def _set_project_sample_dict(project_sample_item, source):
"""Set a project sample dict, mapping a project sample to sample run metrics if present in project summary.
:param project_sample_item: a project sample item
:returns: project_sample_d or empty dict
"""
project_sample_d = {}
#The structure of the database has changed for projects opened after July 1st
#2013 (document 10294_01 for more details)
if source == 'lims':
LOG.debug("This project has LIMS as source of information")
if "library_prep" in project_sample_item.keys():
sample_run_metrics = {k:v.get("sample_run_metrics", {}) for k,v in \
project_sample_item["library_prep"].iteritems()}
project_sample_d = {}
for fc in sample_run_metrics.items():
fc, metrics = fc
for k, v in metrics.iteritems():
sample_run_metrics = v.get('sample_run_metrics_id', '')
if sample_run_metrics:
project_sample_d[k] = v['sample_run_metrics_id']
else:
LOG.warn("No sample_run_metrics information for sample '{}'".format(project_sample_item))
else:
sample_run_metrics = project_sample_item.get("sample_run_metrics", {})
project_sample_d = {metrics[0]:metrics[1]['sample_run_metrics_id'] \
for metrics in sample_run_metrics.items()}
if not project_sample_item.get("sample_run_metrics", {}):
LOG.warn("No sample_run_metrics information for sample '{}'".format(project_sample_item))
else:
if "library_prep" in project_sample_item.keys():
project_sample_d = {x:y for d in [v.get("sample_run_metrics", {}) \
for k,v in project_sample_item["library_prep"].iteritems()] \
for x,y in d.iteritems()}
else:
project_sample_d = {x:y for x,y in project_sample_item.get("sample_run_metrics", {}).iteritems()}
if not project_sample_item.get("sample_run_metrics", {}):
LOG.warn("No sample_run_metrics information for sample '{}'".format(project_sample_item))
return project_sample_d
def _exclude_sample_id(exclude_sample_ids, sample_name, barcode_seq):
"""Check whether we should exclude a sample id.
:param exclude_sample_ids: dictionary of sample:barcode pairs
:param sample_name: project sample name
:param barcode_seq: the barcode sequence
:returns: True if exclude, False otherwise
"""
if exclude_sample_ids and sample_name in exclude_sample_ids.keys():
if exclude_sample_ids[sample_name]:
if barcode_seq in exclude_sample_ids[sample_name]:
LOG.info("excluding sample '{}' with barcode '{}' from report".format(sample_name, barcode_seq))
return True
else:
LOG.info("keeping sample '{}' with barcode '{}' in report".format(sample_name, barcode_seq))
return False
else:
LOG.info("excluding sample '{}' from report".format(sample_name))
return True
def sample_status_note(project_name=None, flowcell=None, username=None, password=None, url=None,
ordered_million_reads=None, uppnex_id=None, customer_reference=None, bc_count=None,
project_alias=[], projectdb="projects", samplesdb="samples", flowcelldb="flowcells",
phix=None, is_paired=True, exclude_sample_ids={}, **kw):
"""Make a sample status note. Used keywords:
:param project_name: project name
:param flowcell: flowcell id
:param username: db username
:param password: db password
:param url: db url
:param ordered_million_reads: number of ordered reads in millions
:param uppnex_id: the uppnex id
:param customer_reference: customer project name
:param project_alias: project alias name
:param phix: phix error rate
:param is_paired: True if run is paired-end, False for single-end
"""
# Cutoffs
cutoffs = {
"phix_err_cutoff" : 2.0,
"qv_cutoff" : 30,
}
instrument = _parse_instrument_config(os.path.expanduser(kw.get("instrument_config","")))
instrument_dict = {i['instrument_id']: i for i in instrument}
# parameters
parameters = {
"project_name" : None,
"start_date" : None,
"FC_id" : None,
"scilifelab_name" : None,
"rounded_read_count" : None,
"phix_error_rate" : None,
"avg_quality_score" : None,
"pct_q30_bases" : None,
"success" : None,
"run_mode":None,
"is_paired":True
}
# key mapping from sample_run_metrics to parameter keys
srm_to_parameter = {"project_name":"sample_prj", "FC_id":"flowcell",
"scilifelab_name":"barcode_name", "start_date":"date",
"rounded_read_count":"bc_count", "lane": "lane"}
exclude_sample_ids = _literal_eval_option(exclude_sample_ids, default={})
LOG.debug("got parameters {}".format(parameters))
output_data = {'stdout':StringIO(), 'stderr':StringIO(), 'debug':StringIO()}
if not _assert_flowcell_format(flowcell):
LOG.warn("Wrong flowcell format {}; skipping. Please use the flowcell id (format \"[A-Z0-9\-]+\")".format(flowcell) )
return output_data
output_data = _update_sample_output_data(output_data, cutoffs)
# Connect and run
s_con = SampleRunMetricsConnection(dbname=samplesdb, username=username, password=password, url=url)
fc_con = FlowcellRunMetricsConnection(dbname=flowcelldb, username=username, password=password, url=url)
p_con = ProjectSummaryConnection(dbname=projectdb, username=username, password=password, url=url)
# Set up paragraphs
paragraphs = sample_note_paragraphs()
headers = sample_note_headers()
# Get project
project = p_con.get_entry(project_name)
source = p_con.get_info_source(project_name)
if not project:
LOG.warn("No such project '{}'".format(project_name))
return output_data
# Set samples list
sample_run_list = _set_sample_run_list(project_name, flowcell, project_alias, s_con)
if len(sample_run_list) == 0:
LOG.warn("No samples for project '{}', flowcell '{}'. Maybe there are no sample run metrics in statusdb?".format(project_name, flowcell))
return output_data
# Set options
ordered_million_reads = _literal_eval_option(ordered_million_reads)
bc_count = _literal_eval_option(bc_count)
phix = _literal_eval_option(phix)
# Count number of times a sample has been run on a flowcell; if several, make lane-specific reports
sample_count = Counter([x.get("barcode_name") for x in sample_run_list])
# Loop samples and collect information
s_param_out = []
fcdoc = None
for s in sample_run_list:
if _exclude_sample_id(exclude_sample_ids, s.get("barcode_name"), s.get("sequence")):
continue
s_param = {}
LOG.debug("working on sample '{}', sample run metrics name '{}', id '{}'".format(s.get("barcode_name", None), s.get("name", None), s.get("_id", None)))
s_param.update(parameters)
s_param.update({key:s[srm_to_parameter[key]] for key in srm_to_parameter.keys()})
fc = "{}_{}".format(s.get("date"), s.get("flowcell"))
# Get instrument
try:
s_param.update(instrument_dict[fc_con.get_instrument(str(fc))])
except:
LOG.warn("Failed to set instrument and software versions for flowcell {} in report due to missing RunInfo -> Instrument field in statusdb. Either rerun 'pm qc update-qc' or search-and-replace 'NN' in the sample report.".format(fc))
s_param.update(instrument_dict['default'])
# Get run mode
if not fcdoc or fcdoc.get("name") != fc:
fcdoc = fc_con.get_entry(fc)
runp = fcdoc.get("RunParameters",{})
s_param["sequencing_platform"] = "MiSeq" if "MCSVersion" in runp else "HiSeq2500"
s_param["clustering_method"] = "onboard clustering" if runp.get("ClusteringChoice","") == "OnBoardClustering" or s_param["sequencing_platform"] == "MiSeq" else "cBot"
s_param["sequencing_setup"] = fcdoc.get("run_setup")
s_param["sequencing_mode"] = runp.get("RunMode","High Output")
s_param["sequencing_software"] = "RTA {}".format(runp.get("RTAVersion"))
if s_param["sequencing_platform"] == "MiSeq":
s_param["sequencing_software"] = "MCS {}/{}".format(runp.get("MCSVersion"),s_param["sequencing_software"])
else:
s_param["sequencing_software"] = "{} {}/{}".format(runp.get("ApplicationName"),runp.get("ApplicationVersion"),s_param["sequencing_software"])
s_param["is_paired"] = fc_con.is_paired_end(str(fc))
if s_param["is_paired"] is None:
LOG.warn("Could not determine run setup for flowcell {}. Will assume paired-end.".format(fc))
s_param["is_paired"] = True
s_param.update(software_versions)
s_param["phix_error_rate"] = fc_con.get_phix_error_rate(str(fc), s["lane"])
if phix:
s_param["phix_error_rate"] = _get_phix_error_rate(s["lane"], phix)
# Get quality score from demultiplex stats, if that fails
# (which it shouldn't), fall back on fastqc data.
(avg_quality_score, pct_q30_bases) = fc_con.get_barcode_lane_statistics(project_name, s.get("barcode_name"), fc, s["lane"])
s_param['avg_quality_score'] = avg_quality_score if avg_quality_score else calc_avg_qv(s)
if not s_param['avg_quality_score']:
LOG.warn("Setting average quality failed for sample {}, id {}".format(s.get("name"), s.get("_id")))
s_param['pct_q30_bases'] = pct_q30_bases
if not s_param['pct_q30_bases']:
LOG.warn("Setting % of >= Q30 Bases (PF) failed for sample {}, id {}".format(s.get("name"), s.get("_id")))
# Compare phix error and qv to cutoffs
err_stat = "OK"
qv_stat = "OK"
if s_param["phix_error_rate"] > cutoffs["phix_err_cutoff"]:
err_stat = "HIGH"
elif s_param["phix_error_rate"] == -1:
err_stat = "N/A"
if s_param["avg_quality_score"] < cutoffs["qv_cutoff"]:
qv_stat = "LOW"
output_data["stdout"].write("{:>18}\t{:>6}\t{:>12}\t{:>12}\t{:>12}\t{:>12}\n".format(s["barcode_name"], s["lane"], s_param["phix_error_rate"], err_stat, s_param["avg_quality_score"], qv_stat))
# Update/set remaning sample run parameters, falling back on project defaults if *key* is missing
s_param['ordered_amount'] = s_param.get('ordered_amount',
p_con.get_ordered_amount(project_name,
samples=p_con.get_entry(project_name,'samples')))
s_param['customer_reference'] = s_param.get('customer_reference', project.get('customer_reference'))
s_param['uppnex_project_id'] = s_param.get('uppnex_project_id', project.get('uppnex_id'))
# Override database settings if options passed at command line
if ordered_million_reads:
s_param["ordered_amount"] = _get_ordered_million_reads(s["barcode_name"], ordered_million_reads)
if bc_count:
s_param["rounded_read_count"] = _round_read_count_in_millions(_get_bc_count(s["barcode_name"], bc_count, s))
else:
s_param["rounded_read_count"] = _round_read_count_in_millions(s_param["rounded_read_count"])
if uppnex_id:
s_param["uppnex_project_id"] = uppnex_id
if customer_reference:
s_param["customer_reference"] = customer_reference
# Get the project sample name corresponding to the sample run
project_sample = p_con.get_project_sample(project_name, s.get("project_sample_name", None))
if project_sample:
LOG.debug("project sample run metrics mapping found: '{}' : '{}'".format(s["name"], project_sample["sample_name"]))
project_sample_item = project_sample['project_sample']
# Set project_sample_d: a dictionary mapping from sample run metrics name to sample run metrics database id
project_sample_d = _set_project_sample_dict(project_sample_item, source)
if not project_sample_d:
LOG.warn("No sample_run_metrics information for sample '{}', barcode name '{}', id '{}'\n\tProject summary information {}".format(s["name"], s["barcode_name"], s["_id"], project_sample))
# Check if sample run metrics name present in project database: if so, verify that database ids are consistent
if s["name"] not in project_sample_d.keys():
LOG.warn("no such sample run metrics '{}' in project sample run metrics dictionary".format(s["name"]) )
else:
if s["_id"] == project_sample_d[s["name"]]:
LOG.debug("project sample run metrics mapping found: '{}' : '{}'".format(s["name"], project_sample_d[s["name"]]))
else:
LOG.warn("inconsistent mapping for '{}': '{}' != '{}' (project summary id)".format(s["name"], s["_id"], project_sample_d[s["name"]]))
s_param['customer_name'] = project_sample_item.get("customer_name", None)
# Always normalize submitted id, since module textttable does not support unicode
if type(s_param['customer_name']) is unicode:
s_param['customer_name'] = unicodedata.normalize('NFKD', s_param['customer_name']).encode('ascii', 'ignore')
# No project sample found. Manual upload to database necessary.
else:
s_param['customer_name'] = None
LOG.warn("No project sample name found for sample run name '{}'".format(s["barcode_name"]))
LOG.info("Please run 'pm qc upload-qc FLOWCELL_ID --extensive-matching' to update project sample names ")
LOG.info("or 'pm qc update --sample_prj PROJECT_NAME --names BARCODE_TO_SAMPLE_MAP to update project sample names.")
LOG.info("Please refer to the pm documentation for examples.")
query_ok(force=kw.get("force", False))
# Finally assess sequencing success, update parameters and set outputs
s_param['success'] = sequencing_success(s_param, cutoffs)
s_param.update({k:"N/A" for k in s_param.keys() if s_param[k] is None or s_param[k] == "" or s_param[k] == -1.0})
if sample_count[s.get("barcode_name")] > 1:
outfile = "{}_{}_{}_{}.pdf".format(s["barcode_name"], s["date"], s["flowcell"], s["lane"])
else:
outfile = "{}_{}_{}.pdf".format(s["barcode_name"], s["date"], s["flowcell"])
s_param["outfile"] = outfile
s_param_out.append(s_param)
# Write final output to reportlab and rst files
output_data["debug"].write(json.dumps({'s_param': s_param_out, 'sample_runs':{s["name"]:s["barcode_name"] for s in sample_run_list}}))
notes = [make_note(headers=headers, paragraphs=paragraphs, **sp) for sp in s_param_out]
rest_notes = make_sample_rest_notes("{}_{}_{}_sample_summary.rst".format(project_name, s.get("date", None), s.get("flowcell", None)), s_param_out)
concatenate_notes(notes, "{}_{}_{}_sample_summary.pdf".format(project_name, s.get("date", None), s.get("flowcell", None)))
return output_data
def _set_sample_table_values(sample_name, project_sample, barcode_seq, ordered_million_reads, param):
"""Set the values for a sample that is to appear in the final table.
:param sample_name: string identifier of sample
:param project_sample: project sample dictionary from project summary database
:param barcode_seq: barcode sequence
:param ordered_million_reads: the number of ordered reads
:param param: project parameters
:returns: vals, a dictionary of table values
"""
vals = {}
vals['ScilifeID'] = project_sample.get("scilife_name", None)
vals['SubmittedID'] = project_sample.get("customer_name", None)
details = project_sample.get("details", None)
try:
vals['MSequenced'] = details["total_reads_(m)"]
except (TypeError, KeyError):
#KeyError : no such key, TypeError: details is None
vals['MSequenced'] = project_sample.get("m_reads_sequenced")
if ordered_million_reads:
param["ordered_amount"] = _get_ordered_million_reads(sample_name, ordered_million_reads)
vals['MOrdered'] = param["ordered_amount"]
vals['BarcodeSeq'] = barcode_seq
# Always normalize submitted id, since module textttable does not support unicode
if type(vals['SubmittedID']) is unicode:
vals['SubmittedID'] = unicodedata.normalize('NFKD', vals['SubmittedID']).encode('ascii', 'ignore')
vals.update({k:"N/A" for k in vals.keys() if vals[k] is None or vals[k] == ""})
return vals
def data_delivery_note(**kw):
"""Create an easily parseable information file with information about the data delivery
"""
output_data = {'stdout':StringIO(), 'stderr':StringIO(), 'debug':StringIO()}
project_name = kw.get('project_name',None)
flowcell = kw.get('flowcell',None)
LOG.debug("Generating data delivery note for project {}{}.".format(project_name,' and flowcell {}'.format(flowcell if flowcell else '')))
# Get a connection to the project and sample databases
p_con = ProjectSummaryConnection(**kw)
assert p_con, "Could not connect to project database"
s_con = SampleRunMetricsConnection(**kw)
assert s_con, "Could not connect to sample database"
# Get the entry for the project and samples from the database
LOG.debug("Fetching samples from sample database")
samples = s_con.get_samples(sample_prj=project_name, fc_id=flowcell)
LOG.debug("Got {} samples from database".format(len(samples)))
# Get the customer sample names from the project database
LOG.debug("Fetching samples from project database")
project_samples = p_con.get_entry(project_name, "samples")
customer_names = {sample_name:sample.get('customer_name','N/A') for sample_name, sample in project_samples.items()}
data = [['SciLifeLab ID','Submitted ID','Flowcell','Lane','Barcode','Read','Path','MD5','Size (bytes)','Timestamp']]
for sample in samples:
sname = sample.get('project_sample_name','N/A')
cname = customer_names.get(sname,'N/A')
fc = sample.get('flowcell','N/A')
lane = sample.get('lane','N/A')
barcode = sample.get('sequence','N/A')
if 'raw_data_delivery' not in sample:
data.append([sname,cname,'','','','','','','',''])
continue
delivery = sample['raw_data_delivery']
tstamp = delivery.get('timestamp','N/A')
for read, file in delivery.get('files',{}).items():
data.append([sname,
cname,
fc,
lane,
barcode,
read,
file.get('path','N/A'),
file.get('md5','N/A'),
file.get('size_in_bytes','N/A'),
tstamp,])
# Write the data to a csv file
outfile = "{}{}_data_delivery.csv".format(project_name,'_{}'.format(flowcell) if flowcell else '')
LOG.debug("Writing delivery data to {}".format(outfile))
with open(outfile,"w") as outh:
csvw = csv.writer(outh)
for row in data:
csvw.writerow(row)
# Write Texttable formatted output to stdout
tt = texttable.Texttable(180)
tt.add_rows(data)
output_data['stdout'].write(tt.draw())
return output_data
def project_status_note(project_name=None, username=None, password=None, url=None,
use_ps_map=True, use_bc_map=False, check_consistency=False,
ordered_million_reads=None, uppnex_id=None, customer_reference=None,
exclude_sample_ids={}, project_alias=None, sample_aliases={},
projectdb="projects", samplesdb="samples", flowcelldb="flowcells",
include_all_samples=False, flat_table=False, **kw):
"""Make a project status note. Used keywords:
:param project_name: project name
:param user: db user name
:param password: db password
:param url: db url
:param use_ps_map: use project summary mapping
:param use_bc_map: use project to barcode name mapping
:param check_consistency: check consistency between mappings
:param ordered_million_reads: number of ordered reads in millions
:param uppnex_id: the uppnex id
:param customer_reference: customer project name
:param exclude_sample_ids: exclude some sample ids from project note
:param project_alias: project alias name
:param sample_aliases: sample alias names
:param projectdb: project db name
:param samplesdb: samples db name
:param flowcelldb: flowcells db name
:param include_all_samples: include all samples in report
:param flat_table: Just create a simple tab-separated version of the table instead of the fancy pdf
"""
# parameters
parameters = {
"project_name" : project_name,
"finished" : "",
}
output_data, sample_table, param = _project_status_note_table(project_name, username, password, url,
use_ps_map, use_bc_map, check_consistency,
ordered_million_reads, uppnex_id,
customer_reference, exclude_sample_ids,
project_alias, sample_aliases, projectdb,
samplesdb, flowcelldb, include_all_samples,
parameters, **kw)
if not flat_table:
# Set report paragraphs
paragraphs = project_note_paragraphs()
headers = project_note_headers()
#Hack: removes Comments paragraph if it is empty
if not param["finished"]:
paragraphs.pop("Comments",None)
paragraphs["Samples"]["tpl"] = make_sample_table(sample_table)
make_note("{}_project_summary.pdf".format(project_name), headers, paragraphs, **param)
make_rest_note("{}_project_summary.rst".format(project_name), sample_table=sample_table, report="project_report", **param)
else:
# Write tab-separated output
sample_table[0].insert(0,'ProjectID')
table_cols = [sample_table[0].index(col) for col in ['ProjectID', 'ScilifeID', 'SubmittedID', 'BarcodeSeq', 'MSequenced']]
outfile = "{}_project_summary.csv".format(project_name)
with open(outfile,"w") as outh:
csvw = csv.writer(outh)
for i,sample in enumerate(sample_table):
if i > 0:
sample.insert(0,project_name)
data = [str(sample[col]) for col in table_cols]
csvw.writerow(data)
output_data['stdout'].write("{}\n".format("\t".join(data)))
param.update({k:"N/A" for k in param.keys() if param[k] is None or param[k] == ""})
output_data["debug"].write(json.dumps({'param':param, 'table':sample_table}))
return output_data
def _project_status_note_table(project_name=None, username=None, password=None, url=None,
use_ps_map=True, use_bc_map=False, check_consistency=False,
ordered_million_reads=None, uppnex_id=None, customer_reference=None,
exclude_sample_ids={}, project_alias=None, sample_aliases={},
projectdb="projects", samplesdb="samples", flowcelldb="flowcells",
include_all_samples=False, param={}, **kw):
# mapping project_summary to parameter keys
ps_to_parameter = {"scilife_name":"scilife_name", "customer_name":"customer_name", "project_name":"project_name"}
# mapping project sample to table
table_keys = ['ScilifeID', 'SubmittedID', 'BarcodeSeq', 'MSequenced', 'MOrdered']
output_data = {'stdout':StringIO(), 'stderr':StringIO(), 'debug':StringIO()}
# Connect and run
s_con = SampleRunMetricsConnection(dbname=samplesdb, username=username, password=password, url=url)
fc_con = FlowcellRunMetricsConnection(dbname=flowcelldb, username=username, password=password, url=url)
p_con = ProjectSummaryConnection(dbname=projectdb, username=username, password=password, url=url)
#Get the information source for this project
source = p_con.get_info_source(project_name)
# Get project summary from project database
sample_aliases = _literal_eval_option(sample_aliases, default={})
prj_summary = p_con.get_entry(project_name)
if not prj_summary:
LOG.warn("No such project '{}'".format(project_name))
return
LOG.debug("Working on project '{}'.".format(project_name))
# Determine if project is finished by getting all samples sequenced date
try:
all_samples_sequenced = prj_summary['project_summary']['all_samples_sequenced']
except (TypeError,KeyError):
all_samples_sequenced = False
# Get sample run list and loop samples to make mapping sample -> {sampleruns}
sample_run_list = _set_sample_run_list(project_name, flowcell=None, project_alias=project_alias, s_con=s_con)
samples = {}
flowcells_run = []
for s in sample_run_list:
prj_sample = p_con.get_project_sample(project_name, s.get("project_sample_name", None))
if prj_sample:
sample_name = prj_sample['project_sample'].get("scilife_name", None)
s_d = {s["name"] : {'sample':sample_name, 'id':s["_id"]}}
samples.update(s_d)
else:
if s["barcode_name"] in sample_aliases:
s_d = {sample_aliases[s["barcode_name"]] : {'sample':sample_aliases[s["barcode_name"]], 'id':s["_id"]}}
samples.update(s_d)
else:
s_d = {s["name"]:{'sample':s["name"], 'id':s["_id"], 'barcode_name':s["barcode_name"]}}
LOG.warn("No mapping found for sample run:\n '{}'".format(s_d))
# collect flowcell that have been sequenced for this project
fc_id = "{}_{}".format(s.get('date'),s.get('flowcell'))
if fc_id not in flowcells_run:
flowcells_run.append(fc_id)
# reformat list of flowcell as one string to be put in report
param["flowcells_run"] = ", ".join(flowcells_run)
# Convert to mapping from desired sample name to list of aliases
# Less important for the moment; one solution is to update the
# Google docs summary table to use the P names
sample_dict = prj_summary['samples']
param.update({key:prj_summary.get(ps_to_parameter[key], None) for key in ps_to_parameter.keys()})
param["ordered_amount"] = param.get("ordered_amount", p_con.get_ordered_amount(project_name, samples=sample_dict))
if not param.get('customer_reference') :
try:
param['customer_reference'] = prj_summary['details']['customer_project_reference']
except (TypeError,KeyError):
param['customer_reference'] = prj_summary.get('customer_reference')
param['uppnex_project_id'] = param.get('uppnex_project_id', prj_summary.get('uppnex_id'))
# Override database values if options passed at command line
if uppnex_id:
param["uppnex_project_id"] = uppnex_id
if customer_reference:
param["customer_reference"] = customer_reference
# Process options
ordered_million_reads = _literal_eval_option(ordered_million_reads)
exclude_sample_ids = _literal_eval_option(exclude_sample_ids, default={})
## Start collecting the data
sample_table = []
samples_excluded = []
last_library_preps = p_con.get_latest_library_prep(project_name)
last_library_preps_srm = [x for l in last_library_preps.values() for x in l]
LOG.debug("Looping through sample map that maps project sample names to sample run metrics ids")
for k,v in samples.items():
LOG.debug("project sample '{}' maps to '{}'".format(k, v))
if not include_all_samples:
if v['sample'] not in last_library_preps.keys():
LOG.info("No library prep information for sample {}; keeping in report".format(v['sample']))
else:
if k not in last_library_preps_srm:
LOG.info("Sample run {} ('{}') is not latest library prep ({}) for project sample {}: excluding from report".format(k, v["id"], ",".join(list(set(last_library_preps[v['sample']].values()))), v['sample']))
continue
else:
pass
if re.search("Unexpected", k):
continue
barcode_seq = s_con.get_entry(k, "sequence")
# Exclude sample id?
if _exclude_sample_id(exclude_sample_ids, v['sample'], barcode_seq):
samples_excluded.append(v['sample'])
continue
# Get the project sample name from the sample run and set table values
project_sample = sample_dict[v['sample']]
vals = _set_sample_table_values(v['sample'], project_sample, barcode_seq, ordered_million_reads, param)
sample_table.append([vals[k] for k in table_keys])
# Loop through samples in sample_dict for which there is no sample run information
samples_in_table_or_excluded = list(set([x[0] for x in sample_table])) + samples_excluded
samples_not_in_table = list(set(sample_dict.keys()) - set(samples_in_table_or_excluded))
for sample in samples_not_in_table:
if re.search("Unexpected", sample):
continue
project_sample = sample_dict[sample]
# Set project_sample_d: a dictionary mapping from sample run metrics name to sample run metrics database id
project_sample_d = _set_project_sample_dict(project_sample, source)
if project_sample_d:
for k,v in project_sample_d.iteritems():
barcode_seq = s_con.get_entry(k, "sequence")
vals = _set_sample_table_values(sample, project_sample, barcode_seq, ordered_million_reads, param)
sample_table.append([vals[k] for k in table_keys])
else:
barcode_seq = None
vals = _set_sample_table_values(sample, project_sample, barcode_seq, ordered_million_reads, param)
sample_table.append([vals[k] for k in table_keys])
if all_samples_sequenced: param["finished"] = 'All samples for this project have been sequenced.'
sample_table.sort()
sample_table = list(sample_table for sample_table,_ in itertools.groupby(sample_table))
sample_table.insert(0, ['ScilifeID', 'SubmittedID', 'BarcodeSeq', 'MSequenced', 'MOrdered'])
return output_data, sample_table, param
| mit |
Andrey-Tkachev/Creto | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.