repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
gibiansky/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py
|
82
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
|
ubgarbage/gae-blog
|
refs/heads/master
|
django/contrib/staticfiles/views.py
|
154
|
"""
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
import urllib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.views import static
from django.contrib.staticfiles import finders
def serve(request, path, document_root=None, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
It uses the django.views.static view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the the --insecure "
"option of 'runserver' is used")
normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
|
sigma-random/asuswrt-merlin
|
refs/heads/master
|
release/src/router/samba36/source4/scripting/python/samba/netcmd/__init__.py
|
19
|
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import optparse, samba
from samba import getopt as options
from ldb import LdbError
import sys, traceback
class Option(optparse.Option):
pass
class Command(object):
"""A net command."""
def _get_description(self):
return self.__doc__.splitlines()[0].rstrip("\n")
def _get_name(self):
name = self.__class__.__name__
if name.startswith("cmd_"):
return name[4:]
return name
name = property(_get_name)
def usage(self, *args):
parser, _ = self._create_parser()
parser.print_usage()
description = property(_get_description)
def _get_synopsis(self):
ret = self.name
if self.takes_args:
ret += " " + " ".join([x.upper() for x in self.takes_args])
return ret
def show_command_error(self, e):
'''display a command error'''
if isinstance(e, CommandError):
(etype, evalue, etraceback) = e.exception_info
inner_exception = e.inner_exception
message = e.message
force_traceback = False
else:
(etype, evalue, etraceback) = sys.exc_info()
inner_exception = e
message = "uncaught exception"
force_traceback = True
if isinstance(inner_exception, LdbError):
(ldb_ecode, ldb_emsg) = inner_exception
print >>sys.stderr, "ERROR(ldb): %s - %s" % (message, ldb_emsg)
elif isinstance(inner_exception, AssertionError):
print >>sys.stderr, "ERROR(assert): %s" % message
force_traceback = True
elif isinstance(inner_exception, RuntimeError):
print >>sys.stderr, "ERROR(runtime): %s - %s" % (message, evalue)
elif type(inner_exception) is Exception:
print >>sys.stderr, "ERROR(exception): %s - %s" % (message, evalue)
force_traceback = True
elif inner_exception is None:
print >>sys.stderr, "ERROR: %s" % (message)
else:
print >>sys.stderr, "ERROR(%s): %s - %s" % (str(etype), message, evalue)
force_traceback = True
if force_traceback or samba.get_debug_level() >= 3:
traceback.print_tb(etraceback)
synopsis = property(_get_synopsis)
outf = sys.stdout
takes_args = []
takes_options = []
takes_optiongroups = {}
def _create_parser(self):
parser = optparse.OptionParser(self.synopsis)
parser.prog = "net"
parser.add_options(self.takes_options)
optiongroups = {}
for name, optiongroup in self.takes_optiongroups.iteritems():
optiongroups[name] = optiongroup(parser)
parser.add_option_group(optiongroups[name])
return parser, optiongroups
def message(self, text):
print text
def _run(self, *argv):
parser, optiongroups = self._create_parser()
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
min_args = 0
max_args = 0
for i, arg in enumerate(self.takes_args):
if arg[-1] not in ("?", "*"):
min_args += 1
max_args += 1
if arg[-1] == "*":
max_args = -1
if len(args) < min_args or (max_args != -1 and len(args) > max_args):
self.usage(*args)
return -1
try:
return self.run(*args, **kwargs)
except Exception, e:
self.show_command_error(e)
return -1
def run(self):
"""Run the command. This should be overriden by all subclasses."""
raise NotImplementedError(self.run)
class SuperCommand(Command):
"""A command with subcommands."""
subcommands = {}
def _run(self, myname, subcommand=None, *args):
if subcommand in self.subcommands:
return self.subcommands[subcommand]._run(subcommand, *args)
print "Available subcommands:"
for cmd in self.subcommands:
print "\t%-20s - %s" % (cmd, self.subcommands[cmd].description)
if subcommand in [None, 'help', '-h', '--help' ]:
return 0
raise CommandError("No such subcommand '%s'" % subcommand)
def usage(self, myname, subcommand=None, *args):
if subcommand is None or not subcommand in self.subcommands:
print "Usage: %s (%s) [options]" % (myname,
" | ".join(self.subcommands.keys()))
else:
return self.subcommands[subcommand].usage(*args)
class CommandError(Exception):
'''an exception class for netcmd errors'''
def __init__(self, message, inner_exception=None):
self.message = message
self.inner_exception = inner_exception
self.exception_info = sys.exc_info()
commands = {}
from samba.netcmd.pwsettings import cmd_pwsettings
commands["pwsettings"] = cmd_pwsettings()
from samba.netcmd.domainlevel import cmd_domainlevel
commands["domainlevel"] = cmd_domainlevel()
from samba.netcmd.setpassword import cmd_setpassword
commands["setpassword"] = cmd_setpassword()
from samba.netcmd.setexpiry import cmd_setexpiry
commands["setexpiry"] = cmd_setexpiry()
from samba.netcmd.enableaccount import cmd_enableaccount
commands["enableaccount"] = cmd_enableaccount()
from samba.netcmd.newuser import cmd_newuser
commands["newuser"] = cmd_newuser()
from samba.netcmd.netacl import cmd_acl
commands["acl"] = cmd_acl()
from samba.netcmd.fsmo import cmd_fsmo
commands["fsmo"] = cmd_fsmo()
from samba.netcmd.export import cmd_export
commands["export"] = cmd_export()
from samba.netcmd.time import cmd_time
commands["time"] = cmd_time()
from samba.netcmd.user import cmd_user
commands["user"] = cmd_user()
from samba.netcmd.vampire import cmd_vampire
commands["vampire"] = cmd_vampire()
from samba.netcmd.machinepw import cmd_machinepw
commands["machinepw"] = cmd_machinepw()
from samba.netcmd.spn import cmd_spn
commands["spn"] = cmd_spn()
from samba.netcmd.group import cmd_group
commands["group"] = cmd_group()
from samba.netcmd.join import cmd_join
commands["join"] = cmd_join()
from samba.netcmd.rodc import cmd_rodc
commands["rodc"] = cmd_rodc()
from samba.netcmd.drs import cmd_drs
commands["drs"] = cmd_drs()
from samba.netcmd.gpo import cmd_gpo
commands["gpo2"] = cmd_gpo()
from samba.netcmd.ldapcmp import cmd_ldapcmp
commands["ldapcmp"] = cmd_ldapcmp()
|
emreg00/biana
|
refs/heads/master
|
biana/BianaParser/XMLNode.py
|
2
|
"""
BIANA: Biologic Interactions and Network Analysis
Copyright (C) 2009 Javier Garcia-Garcia, Emre Guney, Baldo Oliva
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class XMLNode(object):
def __init__(self, name, attrs):
self.name = name
self.attrs = attrs
self.childs = {}
self.value = []
def addChild(self, XMLNodeChild):
self.childs.setdefault(XMLNodeChild.name.lower(), []).append(XMLNodeChild)
def getValue(self):
return "".join(self.value)
def addValue(self, value):
self.value.append(value)
def getChilds(self, child_name=None):
#print [ y.name for x in self.childs.values() for y in x ]
if child_name is None:
c = []
[ c.extend(x) for x in self.childs.values() ]
return c
return self.childs[child_name.lower()]
def getChild(self, child_name):
if( len(self.childs[child_name.lower()])!=1 ):
#raise ValueError("You are trying to obtain a single child when it has %s childs" %(len(self.childs[child_name.lower()]) ) )
return None
return self.childs[child_name.lower()][0]
|
CCI-Tools/ect-core
|
refs/heads/master
|
test/util/__init__.py
|
12133432
| |
otherness-space/myProject
|
refs/heads/master
|
my_project_001/lib/python2.7/site-packages/django/conf/locale/ga/__init__.py
|
12133432
| |
arifsetiawan/edx-platform
|
refs/heads/master
|
common/djangoapps/terrain/stubs/tests/__init__.py
|
12133432
| |
danielmellado/tempest
|
refs/heads/master
|
tempest/api/database/limits/__init__.py
|
12133432
| |
kxliugang/edx-platform
|
refs/heads/master
|
common/djangoapps/student/migrations/0040_auto__del_field_usersignupsource_user_id__add_field_usersignupsource_u.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserSignupSource.user_id'
db.delete_column('student_usersignupsource', 'user_id_id')
# Adding field 'UserSignupSource.user'
db.add_column('student_usersignupsource', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UserSignupSource.user_id'
raise RuntimeError("Cannot reverse this migration. 'UserSignupSource.user_id' and its values cannot be restored.")
# Deleting field 'UserSignupSource.user'
db.delete_column('student_usersignupsource', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
leighpauls/k2cro4
|
refs/heads/master
|
third_party/python_26/Lib/sqlite3/dump.py
|
247
|
# Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <kippesp@gmail.com>
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type == 'table'
"""
schema_res = cu.execute(q)
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM sqlite_sequence;')
elif table_name == 'sqlite_stat1':
yield('ANALYZE sqlite_master;')
elif table_name.startswith('sqlite_'):
continue
# NOTE: Virtual table support not implemented
#elif sql.startswith('CREATE VIRTUAL TABLE'):
# qtable = table_name.replace("'", "''")
# yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\
# "VALUES('table','%s','%s',0,'%s');" %
# qtable,
# qtable,
# sql.replace("''"))
else:
yield('%s;' % sql)
# Build the insert statement for each row of the current table
res = cu.execute("PRAGMA table_info('%s')" % table_name)
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES("
q += ",".join(["'||quote(" + col + ")||'" for col in column_names])
q += ")' FROM '%(tbl_name)s'"
query_res = cu.execute(q % {'tbl_name': table_name})
for row in query_res:
yield("%s;" % row[0])
# Now when the type is 'index', 'trigger', or 'view'
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type IN ('index', 'trigger', 'view')
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield('%s;' % sql)
yield('COMMIT;')
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/extractor/beampro.py
|
36
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
compat_str,
float_or_none,
int_or_none,
parse_iso8601,
try_get,
urljoin,
)
class BeamProBaseIE(InfoExtractor):
_API_BASE = 'https://mixer.com/api/v1'
_RATINGS = {'family': 0, 'teen': 13, '18+': 18}
def _extract_channel_info(self, chan):
user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id'])
return {
'uploader': chan.get('token') or try_get(
chan, lambda x: x['user']['username'], compat_str),
'uploader_id': compat_str(user_id) if user_id else None,
'age_limit': self._RATINGS.get(chan.get('audience')),
}
class BeamProLiveIE(BeamProBaseIE):
IE_NAME = 'Mixer:live'
_VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://mixer.com/niterhayven',
'info_dict': {
'id': '261562',
'ext': 'mp4',
'title': 'Introducing The Witcher 3 // The Grind Starts Now!',
'description': 'md5:0b161ac080f15fe05d18a07adb44a74d',
'thumbnail': r're:https://.*\.jpg$',
'timestamp': 1483477281,
'upload_date': '20170103',
'uploader': 'niterhayven',
'uploader_id': '373396',
'age_limit': 18,
'is_live': True,
'view_count': int,
},
'skip': 'niterhayven is offline',
'params': {
'skip_download': True,
},
}
_MANIFEST_URL_TEMPLATE = '%s/channels/%%s/manifest.%%s' % BeamProBaseIE._API_BASE
@classmethod
def suitable(cls, url):
return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url)
def _real_extract(self, url):
channel_name = self._match_id(url)
chan = self._download_json(
'%s/channels/%s' % (self._API_BASE, channel_name), channel_name)
if chan.get('online') is False:
raise ExtractorError(
'{0} is offline'.format(channel_name), expected=True)
channel_id = chan['id']
def manifest_url(kind):
return self._MANIFEST_URL_TEMPLATE % (channel_id, kind)
formats = self._extract_m3u8_formats(
manifest_url('m3u8'), channel_name, ext='mp4', m3u8_id='hls',
fatal=False)
formats.extend(self._extract_smil_formats(
manifest_url('smil'), channel_name, fatal=False))
self._sort_formats(formats)
info = {
'id': compat_str(chan.get('id') or channel_name),
'title': self._live_title(chan.get('name') or channel_name),
'description': clean_html(chan.get('description')),
'thumbnail': try_get(
chan, lambda x: x['thumbnail']['url'], compat_str),
'timestamp': parse_iso8601(chan.get('updatedAt')),
'is_live': True,
'view_count': int_or_none(chan.get('viewersTotal')),
'formats': formats,
}
info.update(self._extract_channel_info(chan))
return info
class BeamProVodIE(BeamProBaseIE):
IE_NAME = 'Mixer:vod'
_VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>\d+)'
_TEST = {
'url': 'https://mixer.com/willow8714?vod=2259830',
'md5': 'b2431e6e8347dc92ebafb565d368b76b',
'info_dict': {
'id': '2259830',
'ext': 'mp4',
'title': 'willow8714\'s Channel',
'duration': 6828.15,
'thumbnail': r're:https://.*source\.png$',
'timestamp': 1494046474,
'upload_date': '20170506',
'uploader': 'willow8714',
'uploader_id': '6085379',
'age_limit': 13,
'view_count': int,
},
'params': {
'skip_download': True,
},
}
@staticmethod
def _extract_format(vod, vod_type):
if not vod.get('baseUrl'):
return []
if vod_type == 'hls':
filename, protocol = 'manifest.m3u8', 'm3u8_native'
elif vod_type == 'raw':
filename, protocol = 'source.mp4', 'https'
else:
assert False
data = vod.get('data') if isinstance(vod.get('data'), dict) else {}
format_id = [vod_type]
if isinstance(data.get('Height'), compat_str):
format_id.append('%sp' % data['Height'])
return [{
'url': urljoin(vod['baseUrl'], filename),
'format_id': '-'.join(format_id),
'ext': 'mp4',
'protocol': protocol,
'width': int_or_none(data.get('Width')),
'height': int_or_none(data.get('Height')),
'fps': int_or_none(data.get('Fps')),
'tbr': int_or_none(data.get('Bitrate'), 1000),
}]
def _real_extract(self, url):
vod_id = self._match_id(url)
vod_info = self._download_json(
'%s/recordings/%s' % (self._API_BASE, vod_id), vod_id)
state = vod_info.get('state')
if state != 'AVAILABLE':
raise ExtractorError(
'VOD %s is not available (state: %s)' % (vod_id, state),
expected=True)
formats = []
thumbnail_url = None
for vod in vod_info['vods']:
vod_type = vod.get('format')
if vod_type in ('hls', 'raw'):
formats.extend(self._extract_format(vod, vod_type))
elif vod_type == 'thumbnail':
thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png')
self._sort_formats(formats)
info = {
'id': vod_id,
'title': vod_info.get('name') or vod_id,
'duration': float_or_none(vod_info.get('duration')),
'thumbnail': thumbnail_url,
'timestamp': parse_iso8601(vod_info.get('createdAt')),
'view_count': int_or_none(vod_info.get('viewsTotal')),
'formats': formats,
}
info.update(self._extract_channel_info(vod_info.get('channel') or {}))
return info
|
snbueno/anaconda
|
refs/heads/master
|
pyanaconda/ui/gui/spokes/lib/cart.py
|
10
|
# Disk shopping cart
#
# Copyright (C) 2011, 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
# Chris Lumens <clumens@redhat.com>
#
from gi.repository import Gtk
from pyanaconda.i18n import C_, P_
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import escape_markup
from blivet.size import Size
__all__ = ["SelectedDisksDialog"]
IS_BOOT_COL = 0
DESCRIPTION_COL = 1
SIZE_COL = 2
FREE_SPACE_COL = 3
NAME_COL = 4
ID_COL = 5
class SelectedDisksDialog(GUIObject):
builderObjects = ["selected_disks_dialog", "disk_store", "disk_tree_view"]
mainWidgetName = "selected_disks_dialog"
uiFile = "spokes/lib/cart.glade"
def __init__(self, data):
GUIObject.__init__(self, data)
self._view = self.builder.get_object("disk_tree_view")
self._store = self.builder.get_object("disk_store")
self._selection = self.builder.get_object("disk_selection")
self._summary_label = self.builder.get_object("summary_label")
self._set_button = self.builder.get_object("set_as_boot_button")
self._remove_button = self.builder.get_object("remove_button")
# pylint: disable=arguments-differ
def initialize(self, disks, free, showRemove=True, setBoot=True):
self._previousID = None
for disk in disks:
self._store.append([False,
"%s (%s)" % (disk.description, disk.serial),
str(disk.size),
str(free[disk.name][0]),
disk.name,
disk.id])
self.disks = disks[:]
self._update_summary()
if not showRemove:
self.builder.get_object("remove_button").hide()
if not setBoot:
self._set_button.hide()
if not disks:
return
# Don't select a boot device if no boot device is asked for.
if self.data.bootloader.location == "none":
return
# Set up the default boot device. Use what's in the ksdata if anything,
# then fall back to the first device.
default_id = None
if self.data.bootloader.bootDrive:
for d in self.disks:
if d.name == self.data.bootloader.bootDrive:
default_id = d.id
if not default_id:
default_id = self.disks[0].id
# And then select it in the UI.
for row in self._store:
if row[ID_COL] == default_id:
self._previousID = row[ID_COL]
row[IS_BOOT_COL] = True
break
# pylint: disable=arguments-differ
def refresh(self, disks, free, showRemove=True, setBoot=True):
super(SelectedDisksDialog, self).refresh()
# clear out the store and repopulate it from the devicetree
self._store.clear()
self.initialize(disks, free, showRemove=showRemove, setBoot=setBoot)
def run(self):
rc = self.window.run()
self.window.destroy()
return rc
def _get_selection_refs(self):
selected_refs = []
if self._selection.count_selected_rows():
selected_paths = self._selection.get_selected_rows()[1]
selected_refs = [Gtk.TreeRowReference() for _p in selected_paths]
return selected_refs
def _update_summary(self):
count = 0
size = Size(0)
free = Size(0)
for row in self._store:
count += 1
size += Size(row[SIZE_COL])
free += Size(row[FREE_SPACE_COL])
# pylint: disable=unescaped-markup
text = P_("<b>%(count)d disk; %(size)s capacity; %(free)s free space</b> "
"(unpartitioned and in file systems)",
"<b>%(count)d disks; %(size)s capacity; %(free)s free space</b> "
"(unpartitioned and in file systems)",
count) % {"count" : count,
"size" : escape_markup(size),
"free" : escape_markup(free)}
self._summary_label.set_markup(text)
# signal handlers
def on_remove_clicked(self, button):
itr = self._selection.get_selected()[1]
if not itr:
return
disk = None
for d in self.disks:
if d.id == self._store[itr][ID_COL]:
disk = d
break
if not disk:
return
# If this disk was marked as the boot device, just change to the first one
# instead.
resetBootDevice = self._store[itr][IS_BOOT_COL]
# remove the selected disk(s) from the list and update the summary label
self._store.remove(itr)
self.disks.remove(disk)
if resetBootDevice and len(self._store) > 0:
self._store[0][IS_BOOT_COL] = True
self._update_summary()
# If no disks are available in the cart anymore, grey out the buttons.
self._set_button.set_sensitive(len(self._store) > 0)
self._remove_button.set_sensitive(len(self._store) > 0)
def on_close_clicked(self, button):
# Save the boot device setting, if something was selected.
for row in self._store:
if row[IS_BOOT_COL]:
for disk in self.disks:
if disk.id == row[ID_COL]:
self.data.bootloader.bootDrive = disk.name
self.data.bootloader.location = "mbr"
return
# No device was selected. The user does not want to install
# a bootloader.
self.data.bootloader.bootDrive = ""
self.data.bootloader.location = "none"
def _toggle_button_text(self, row):
if row[IS_BOOT_COL]:
self._set_button.set_label(C_("GUI|Selected Disks Dialog",
"_Do not install boot loader"))
else:
self._set_button.set_label(C_("GUI|Selected Disks Dialog",
"_Set as Boot Device"))
def on_selection_changed(self, *args):
itr = self._selection.get_selected()[1]
if not itr:
return
self._toggle_button_text(self._store[itr])
def on_set_as_boot_clicked(self, button):
itr = self._selection.get_selected()[1]
if not itr:
return
# There's only two cases:
if self._store[itr][ID_COL] == self._previousID:
# Either the user clicked on the device they'd previously selected,
# in which case we are just toggling here.
self._store[itr][IS_BOOT_COL] = not self._store[itr][IS_BOOT_COL]
else:
# Or they clicked on a different device. First we unselect the
# previously selected device.
for row in self._store:
if row[ID_COL] == self._previousID:
row[IS_BOOT_COL] = False
break
# Then we select the new row.
self._store[itr][IS_BOOT_COL] = True
self._previousID = self._store[itr][ID_COL]
self._toggle_button_text(self._store[itr])
|
ndsol/volcano
|
refs/heads/master
|
src/gn/toolchain/wrapper_utils.py
|
1
|
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Copyright (c) 2017-2018 the Volcano Authors. All rights reserved.
# Licensed under the GPLv3.
"""Helper functions for gcc_toolchain.gni wrappers."""
import os
import re
import subprocess
from select import select
import shlex
import sys
_BAT_PREFIX = 'cmd /c call '
_WHITELIST_RE = re.compile('whitelisted_resource_(?P<resource_id>[0-9]+)')
def CommandToRun(command):
"""Generates commands compatible with Windows.
When running on a Windows host and using a toolchain whose tools are
actually wrapper scripts (i.e. .bat files on Windows) rather than binary
executables, the |command| to run has to be prefixed with this magic.
The GN toolchain definitions take care of that for when GN/Ninja is
running the tool directly. When that command is passed in to this
script, it appears as a unitary string but needs to be split up so that
just 'cmd' is the actual command given to Python's subprocess module.
Args:
command: List containing the UNIX style |command|.
Returns:
A list containing the Windows version of the |command|.
"""
if command[0].startswith(_BAT_PREFIX):
command = command[0].split(None, 3) + command[1:]
return command
def ResolveRspLinks(inputs):
"""Return a list of files contained in a response file.
Args:
inputs: A command containing rsp files.
Returns:
A set containing the rsp file content."""
rspfiles = [a[1:] for a in inputs if a.startswith('@')]
resolved = set()
for rspfile in rspfiles:
with open(rspfile, 'r') as f:
resolved.update(shlex.split(f.read()))
return resolved
def CombineResourceWhitelists(whitelist_candidates, outfile):
"""Combines all whitelists for a resource file into a single whitelist.
Args:
whitelist_candidates: List of paths to rsp files containing all targets.
outfile: Path to save the combined whitelist.
"""
whitelists = ('%s.whitelist' % candidate for candidate in whitelist_candidates
if os.path.exists('%s.whitelist' % candidate))
resources = set()
for whitelist in whitelists:
with open(whitelist, 'r') as f:
resources.update(f.readlines())
with open(outfile, 'w') as f:
f.writelines(resources)
def ExtractResourceIdsFromPragmaWarnings(text):
"""Returns set of resource IDs that are inside unknown pragma warnings.
Args:
text: The text that will be scanned for unknown pragma warnings.
Returns:
A set containing integers representing resource IDs.
"""
used_resources = set()
lines = text.splitlines()
for ln in lines:
match = _WHITELIST_RE.search(ln)
if match:
resource_id = int(match.group('resource_id'))
used_resources.add(resource_id)
return used_resources
class runcmd(object):
def __init__(self, env):
self.have_out = False
self.have_err = False
self.env = env
self.p = None
def handle_output(self, is_stdout, out, data):
out.write(data.decode('utf8', 'ignore')
.encode(sys.stdout.encoding, 'replace').decode(sys.stdout.encoding))
out.flush()
def run(self, args):
path_saved = os.environ["PATH"]
if "PATH" in self.env:
os.environ["PATH"] = self.env["PATH"]
try:
import pty
masters, slaves = zip(pty.openpty(), pty.openpty())
self.p = subprocess.Popen(args, stdout=slaves[0], stderr=slaves[1],
env = self.env)
for fd in slaves: os.close(fd)
readable = { masters[0]: sys.stdout, masters[1]: sys.stderr }
while readable:
for fd in select(readable, [], [])[0]:
try: data = os.read(fd, 1024)
except OSError as e:
if e.errno != errno.EIO: raise
del readable[fd]
continue
if not data:
del readable[fd]
continue
self.handle_output(fd == masters[0], readable[fd], data)
if fd != masters[0]:
self.have_err = True
else:
self.have_out = True
self.p.wait()
except ImportError:
if sys.platform != "win32":
raise
self.p = subprocess.Popen(args, shell=True)
(o, e) = self.p.communicate()
self.have_out = False
self.have_err = False
if o:
self.have_out = True
self.handle_output(True, sys.stdout, o)
if e:
self.have_err = True
self.handle_output(False, sys.stdout, e)
os.environ["PATH"] = path_saved
class runcmd_return_stderr(runcmd):
def __init__(self, env, max_out_len):
if sys.platform == "darwin":
# see https://stackoverflow.com/questions/46402772
env["JAVA_OPTS"] = "-XX:+IgnoreUnrecognizedVMOptions --add-modules java.se.ee"
super(runcmd_return_stdout, self).__init__(env)
self.max_out_len = max_out_len
self.result = b""
self.also_stderr = False
def handle_output(self, is_stdout, out, data):
if is_stdout:
return
self.result += data
if len(self.result) > self.max_out_len:
self.p.kill()
def run(self, args):
super(runcmd_return_stdout, self).run(args)
if len(self.result) > self.max_out_len:
print("run(%s) got more than %d bytes" % (args, self.max_out_len))
sys.exit(1)
return (self.result
.decode('utf8', 'ignore')
.encode(sys.stdout.encoding, 'replace')
.decode(sys.stdout.encoding))
def CaptureCommandStderr(command):
"""Returns the stderr of a command.
"""
r = runcmd_return_stderr(os.environ.copy(), 4096)
stderr = r.run(command)
return r.p.returncode, stderr
|
scrappleapp/scrapple
|
refs/heads/master
|
scrapple/selectors/xpath.py
|
2
|
"""
scrapple.selectors.xpath
~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
from scrapple.selectors.selector import Selector
from scrapple.utils.text import make_ascii
class XpathSelector(Selector):
"""
The ``XpathSelector`` object defines XPath expressions.
"""
__selector_type__ = 'XPath'
def __init__(self, url):
"""
The ``Selector`` class acts as the super class for this class.
"""
super(XpathSelector, self).__init__(url)
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
tags = self.tree.xpath(selector)
if get_one:
return tags[0]
return tags
|
mozilla/stoneridge
|
refs/heads/master
|
python/src/Lib/ctypes/test/test_bitfields.py
|
32
|
from ctypes import *
import unittest
import os
import ctypes
import _ctypes_test
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
func = CDLL(_ctypes_test.__file__).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
##for n in "ABCDEFGHIMNOPQRS":
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
class C_Test(unittest.TestCase):
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
class BitFieldTest(unittest.TestCase):
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
x.a, x.b, x.c = -1, 7, -1
self.assertEqual((x.a, x.b, x.c), (-1, 7, -1))
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
self.assertEqual((x.a, x.b, x.c), (0, 0, 0))
x.a, x.b, x.c = 7, 7, 7
self.assertEqual((x.a, x.b, x.c), (1, 7, 1))
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ)*2)
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char_p'))
result = self.fail_fields(("a", c_void_p, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_void_p'))
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type LP_c_int'))
result = self.fail_fields(("a", c_char, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char'))
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_wchar'))
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy'))
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
result = self.fail_fields(("a", c_typ, 0))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
class X(Structure):
_fields_ = [("a", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
self.assertEqual(sizeof(X), sizeof(c_typ))
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short))
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, 0)
self.assertEqual(X.a1.offset, sizeof(c_short))
self.assertEqual(X.b.offset, sizeof(c_short)*2)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, sizeof(c_short)*0)
self.assertEqual(X.b.offset, sizeof(c_short)*1)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception, detail:
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int)*2)
else:
self.assertEqual(sizeof(X), sizeof(c_int))
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
self.assertEqual(sizeof(X), sizeof(c_int)*2)
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
self.assertEqual(sizeof(X), sizeof(c_byte))
def test_mixed_4(self):
class X(Structure):
_fields_ = [("a", c_short, 4),
("b", c_short, 4),
("c", c_int, 24),
("d", c_short, 4),
("e", c_short, 4),
("f", c_int, 24)]
# MSVC does NOT combine c_short and c_int into one field, GCC
# does (unless GCC is run with '-mms-bitfields' which
# produces code compatible with MSVC).
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int) * 4)
else:
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
if __name__ == "__main__":
unittest.main()
|
CospanDesign/nysa-gui
|
refs/heads/master
|
NysaGui/host/adc_visualizer/__init__.py
|
12133432
| |
discoverygarden/fcrepo
|
refs/heads/master
|
src/fcrepo/tests/__init__.py
|
12133432
| |
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
common/djangoapps/static_replace/management/commands/__init__.py
|
12133432
| |
fnouama/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/dunderAll/before/src/b.py
|
12133432
| |
Ahmad31/Web_Flask_Cassandra
|
refs/heads/master
|
flask/lib/python2.7/site-packages/cqlengine/tests/connections/__init__.py
|
12133432
| |
sagark123/coala
|
refs/heads/master
|
tests/results/AbsolutePositionTest.py
|
35
|
import unittest
from coalib.results.AbsolutePosition import AbsolutePosition, calc_line_col
from coalib.misc.Constants import COMPLEX_TEST_STRING
class AbsolutePositionTest(unittest.TestCase):
def test_calc_line_col_newlines(self):
# no newlines
text = ("find position of 'z'",)
z_pos = text[0].find('z')
self.assertEqual(
calc_line_col(text, z_pos), (1, z_pos + 1))
# newline
text = ('find position of\n', "'z'",)
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 2))
def test_calc_line_col_unicode(self):
uni_pos = COMPLEX_TEST_STRING.find('↑')
self.assertEqual(
calc_line_col((COMPLEX_TEST_STRING,), uni_pos),
(1, uni_pos + 1))
def test_calc_line_col_rawstrings(self):
for raw in [(r'a\b',), (r'a\n',), ('a\\n',)]:
pos = raw[0].find(raw[0][-1])
self.assertEqual(calc_line_col(raw, pos), (1, 3))
def test_calc_line_col_extremes(self):
# End of Line
text = ('Fitst Line\n', 'End of sencond line z')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos),
(2, len(text[1])))
# Out of text
with self.assertRaises(ValueError):
text = ('Some line')
calc_line_col(text, 50)
# start of line
text = ('First Line\n', 'zEnd of sencond line')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 1))
def test_property(self):
uut = AbsolutePosition(('1', '2'), 1)
self.assertEqual(uut.position, 1)
self.assertEqual(uut.line, 2)
self.assertEqual(uut.column, 1)
uut = AbsolutePosition()
self.assertEqual(uut.position, None)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
uut = AbsolutePosition(('a\n', 'b\n'), 0)
self.assertEqual(uut.position, 0)
self.assertEqual(uut.line, 1)
self.assertEqual(uut.column, 1)
def test_instantiation(self):
with self.assertRaises(ValueError):
uut = AbsolutePosition((), 0)
uut = AbsolutePosition(position=5)
self.assertEqual(uut.position, 5)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
|
gilramir/instmake
|
refs/heads/master
|
instmakeplugins/report_ovtime.py
|
1
|
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Report multiple makes in the same directory.
"""
from instmakelib import instmake_log as LOG
description = "Over-all Build Time. Accepts multiple instmake logs."
def usage():
print "ovtime:", description
def print_ovtime(log_file_name, print_filename):
# Open the log file
log = LOG.LogFile(log_file_name)
top_rec = None
# Read the log records
while 1:
try:
rec = log.read_record()
except EOFError:
log.close()
break
if rec.ppid == None:
if top_rec:
print "Found another record with no PPID."
top_rec = rec
if not top_rec:
print "No top-most record found."
return
print
if print_filename:
print "LOGFILE: ", log_file_name
print "CWD: ", top_rec.cwd
print "CMDLINE: ", top_rec.cmdline
print "RETVAL: ", top_rec.retval
print
print "real ", LOG.hms(top_rec.diff_times[rec.TimeIndex("REAL")])
print "user ", LOG.hms(top_rec.diff_times[rec.TimeIndex("USER")])
print "sys ", LOG.hms(top_rec.diff_times[rec.TimeIndex("SYS")])
def report(log_file_names, args):
print_filename = 0
if len(log_file_names) > 1:
print_filename = 1
for log_file_name in log_file_names:
print_ovtime(log_file_name, print_filename)
print
print
|
w12n/mascotas
|
refs/heads/master
|
mascotas/__init__.py
|
12133432
| |
JioCloud/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/data_processing/data_plugins/urls.py
|
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.project.\
data_processing.data_plugins import views
urlpatterns = patterns('',
url(r'^$', views.PluginsView.as_view(), name='index'),
url(r'^(?P<plugin_id>[^/]+)$',
views.PluginDetailsView.as_view(), name='details'),
)
|
stuartsale/pyBHC
|
refs/heads/master
|
pyBHC/dists.py
|
1
|
from __future__ import print_function, division
import abc
import numpy as np
from numpy.linalg import slogdet
import math
from scipy import stats
from scipy.special import gammaln, multigammaln
LOG2PI = math.log(2*math.pi)
LOG2 = math.log(2)
LOGPI = math.log(math.pi)
class CollapsibleDistribution(object):
""" Abstract base class for a family of conjugate distributions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def log_marginal_likelihood(self):
pass
class FrozenDistribution(object):
""" Abstract base class for a probability distribution whose
parameters are fixed.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def __call__(self):
pass
class NormalInverseWishart(CollapsibleDistribution):
"""
Multivariate Normal likelihood with multivariate Normal prior on
mean and Inverse-Wishart prior on the covariance matrix.
All math taken from Kevin Murphy's 2007 technical report,
'Conjugate Bayesian analysis of the Gaussian distribution'.
"""
def __init__(self, **prior_hyperparameters):
self.nu_0 = prior_hyperparameters['nu_0']
self.mu_0 = prior_hyperparameters['mu_0']
self.kappa_0 = prior_hyperparameters['kappa_0']
self.lambda_0 = prior_hyperparameters['lambda_0']
self.d = float(len(self.mu_0))
self.log_z = self.calc_log_z(self.mu_0, self.lambda_0, self.kappa_0,
self.nu_0)
@staticmethod
def update_parameters(X, _mu, _lambda, _kappa, _nu, _d):
n = X.shape[0]
xbar = np.mean(X, axis=0)
kappa_n = _kappa + n
nu_n = _nu + n
mu_n = (_kappa*_mu + n*xbar)/kappa_n
S = np.zeros(_lambda.shape) if n == 1 else (n-1)*np.cov(X.T)
dt = (xbar-_mu)[np.newaxis]
back = np.dot(dt.T, dt)
lambda_n = _lambda + S + (_kappa*n/kappa_n)*back
assert(mu_n.shape[0] == _mu.shape[0])
assert(lambda_n.shape[0] == _lambda.shape[0])
assert(lambda_n.shape[1] == _lambda.shape[1])
return mu_n, lambda_n, kappa_n, nu_n
@staticmethod
def calc_log_z(_mu, _lambda, _kappa, _nu):
d = len(_mu)
sign, detr = slogdet(_lambda)
log_z = (LOG2*(_nu*d/2.0)
+ (d/2.0)*math.log(2*math.pi/_kappa)
+ multigammaln(_nu/2, d) - (_nu/2.0)*detr)
return log_z
def log_marginal_likelihood(self, X):
n = X.shape[0]
params_n = self.update_parameters(X, self.mu_0, self.lambda_0,
self.kappa_0, self.nu_0, self.d)
log_z_n = self.calc_log_z(*params_n)
return log_z_n - self.log_z - LOG2PI*(n*self.d/2)
def log_posterior_predictive(self, X_new, X_old):
""" log_posterior_predictive(X_new, X_old)
Find the posterior predictive probabilitiy p(X_new|X_old)
where X_old is some data we already have and X_new is the
point at which we want the posterior predictive prob.
The posterior predictive distribution is a (multivariate)
t-distribution.
The formula required is given by
en.wikipedia.org/wiki/Conjugate_prior
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_new : ndarray
The point for which we want the posterior predicitve.
"""
params_old = self.update_parameters(X_old, self.mu_0,
self.lambda_0,
self.kappa_0, self.nu_0,
self.d)
t_sigma = ((params_old[2]+1) / (params_old[2]*(params_old[3]-self.d+1))
* params_old[1])
t_sigma_inv = np.linalg.inv(t_sigma)
t_dof = params_old[3]-self.d+1
t_z = X_new - params_old[0]
t_logdiff = math.log(1+np.sum(t_z*np.dot(t_sigma_inv, t_z))
/ t_dof)
sgn, det = np.linalg.slogdet(t_sigma)
prob = (gammaln((t_dof+self.d)/2)
- gammaln(t_dof/2)
- self.d/2*math.log(t_dof)
- self.d/2*LOGPI
- det/2
- (t_dof+self.d)/2*t_logdiff)
return prob
def conditional_sample(self, X, size=1):
""" conditional_sample(X)
Sample from the posterior predictive distribution
conditioned on some data X.
The posterior predicitve distribution follows a
multivariate t distribution, as per
en.wikipedia.org/wiki/Conjugate_prior .
The multivariate is sampled by performing
x = mu + Z sqrt(nu/u) ,
where
Z ~ N(0, sigma) ,
u ~ chi2(nu) ,
this implies
x ~ t_nu(mu, sigma)
Parameters
----------
X : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
size : int, optional
The number of samples to be drawn.
"""
output = np.zeros((size, self.d))
params_n = self.update_parameters(X, self.mu_0, self.lambda_0,
self.kappa_0, self.nu_0,
self.d)
t_dof = params_n[3] - self.d + 1
t_cov = (params_n[2]+1) / (params_n[2]*t_dof) * params_n[1]
mvn_rv = stats.multivariate_normal(cov=t_cov)
chi2_rv = stats.chi2(t_dof)
for it in range(size):
# Sample u from chi2 dist
u = chi2_rv.rvs()
# Sample from multivariate Normal
z = mvn_rv.rvs()
output[it, :] = params_n[0] + z*math.sqrt(t_dof/u)
return output
class NormalFixedCovar(CollapsibleDistribution):
"""
Multivariate Normal likelihood with multivariate Normal prior on
mean and a fixed covariance matrix.
All math taken from Kevin Murphy's 2007 technical report,
'Conjugate Bayesian analysis of the Gaussian distribution'.
"""
def __init__(self, **prior_hyperparameters):
self.mu_0 = prior_hyperparameters['mu_0']
self.sigma_0 = prior_hyperparameters['sigma_0']
self.S = prior_hyperparameters['S']
self.d = float(len(self.mu_0))
sgn, self.sigma_0_det = np.linalg.slogdet(self.sigma_0)
sgn, self.S_det = np.linalg.slogdet(self.S)
self.sigma_0_inv = np.linalg.inv(self.sigma_0)
self.S_inv = np.linalg.inv(self.S)
self.log_z0 = self.calc_log_z(self.mu_0, self.sigma_0, self.S)
@staticmethod
def update_parameters(X, _mu, _sigma, S, _d):
n = X.shape[0]
xbar = np.mean(X, axis=0)
_sigma_inv = np.linalg.inv(_sigma)
S_inv = np.linalg.inv(S)
# update variance on mean
sigma_n_inv = _sigma_inv + n*S_inv
sigma_n = np.linalg.inv(sigma_n_inv)
# update mean
mu_n = (np.dot(_sigma_inv, _mu)
+ n*np.dot(S_inv, xbar))
mu_n = np.dot(sigma_n, mu_n)
assert(mu_n.shape[0] == _mu.shape[0])
assert(sigma_n.shape[0] == _sigma.shape[0])
assert(sigma_n.shape[1] == _sigma.shape[1])
return mu_n, sigma_n, S
@staticmethod
def calc_log_z(_mu, _sigma, S):
d = len(_mu)
sign, detr = slogdet(_sigma)
_sigma_inv = np.linalg.inv(_sigma)
log_z = detr/2 + np.sum(_mu*np.dot(_sigma_inv, _mu))
return log_z
def log_marginal_likelihood(self, X):
n = X.shape[0]
params_n = self.update_parameters(X, self.mu_0, self.sigma_0,
self.S, self.d)
log_z_n = self.calc_log_z(*params_n)
Q = 0.
for i in range(n):
Q += np.sum(X[i, :]*np.dot(self.S_inv, X[i, :]))
lml = log_z_n - self.log_z0 - LOG2PI*(n*self.d/2) - Q - self.S_det*n/2
return lml
def log_posterior_predictive(self, X_new, X_old):
""" log_posterior_predictive(X_new, X_old)
Find the posterior predictive probabilitiy p(X_new|X_old)
where X_old is some data we already have and X_new is the
point at which we want the posterior predictive prob.
The posterior predictive distribution is a (multivariate)
t-distribution.
The formula required is given by
en.wikipedia.org/wiki/Conjugate_prior
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_new : ndarray
The point for which we want the posterior predicitve.
"""
params_old = self.update_parameters(X_old, self.mu_0,
self.sigma_0, self.S,
self.d)
z_sigma = params_old[1]+self.S
z_sigma_inv = np.linalg.inv(z_sigma)
diff = X_new-params_old[0]
z = np.sum(diff*np.dot(z_sigma_inv, diff))
sgn, det = np.linalg.slogdet(z_sigma)
prob = (- self.d/2*LOG2PI - det/2 - z/2)
return prob
def conditional_sample(self, X, size=1):
""" conditional_sample(X)
Sample from the posterior predictive distribution
conditioned on some data X.
For the Normal distribution the samples
are found by sampling froma (multivariate) Normal.
Parameters
----------
X : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
size : int, optional
The number of samples to be drawn.
"""
output = np.zeros((size, self.d))
params_n = self.update_parameters(X, self.mu_0, self.sigma_0, self.S,
self.d)
for it in range(size):
# get covariance
cov = params_n[1]+self.S
# Sample from multivariate Normal
output[it, :] = stats.multivariate_normal.rvs(
mean=params_n[0], cov=cov)
return output
|
inverthermit/pacman_ai
|
refs/heads/master
|
search/pacman.py
|
42
|
# pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util, layout
import sys, types, time, random, os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose(): return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions( self )
else:
return GhostRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction( state, action )
else: # A ghost is moving
GhostRules.applyAction( state, action, agentIndex )
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
# Resolve multi-agent effects
GhostRules.checkDeath( state, agentIndex )
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions( self ):
return self.getLegalActions( 0 )
def generatePacmanSuccessor( self, action ):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor( 0, action )
def getPacmanState( self ):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition( self ):
return self.data.agentStates[0].getPosition()
def getGhostStates( self ):
return self.data.agentStates[1:]
def getGhostState( self, agentIndex ):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition( self, agentIndex ):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood( self ):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose( self ):
return self.data._lose
def isWin( self ):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return hash( self.data )
def __str__( self ):
return str(self.data)
def initialize( self, layout, numGhostAgents=1000 ):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame( self, layout, pacmanAgent, ghostAgents, display, quiet = False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize( layout, len(ghostAgents) )
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin(): self.win(state, game)
if state.isLose(): self.lose(state, game)
def win( self, state, game ):
if not self.quiet: print "Pacman emerges victorious! Score: %d" % state.data.score
game.gameOver = True
def lose( self, state, game ):
if not self.quiet: print "Pacman died! Score: %d" % state.data.score
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print "Pacman crashed"
else:
print "A ghost crashed"
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED=1
def getLegalActions( state ):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
applyAction = staticmethod( applyAction )
def consume( position, state ):
x,y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if( position in state.getCapsules() ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range( 1, len( state.data.agentStates ) ):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED=1.0
def getLegalActions( state, ghostIndex ):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState( ghostIndex ).configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
reverse = Actions.reverseDirection( conf.direction )
if Directions.STOP in possibleActions:
possibleActions.remove( Directions.STOP )
if reverse in possibleActions and len( possibleActions ) > 1:
possibleActions.remove( reverse )
return possibleActions
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action, ghostIndex):
legal = GhostRules.getLegalActions( state, ghostIndex )
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0: speed /= 2.0
vector = Actions.directionToVector( action, speed )
ghostState.configuration = ghostState.configuration.generateSuccessor( vector )
applyAction = staticmethod( applyAction )
def decrementTimer( ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint( ghostState.configuration.pos )
ghostState.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def checkDeath( state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range( 1, len( state.data.agentStates ) ):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, index )
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, agentIndex )
checkDeath = staticmethod( checkDeath )
def collide( state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod( collide )
def canKill( pacmanPosition, ghostPosition ):
return manhattanDistance( ghostPosition, pacmanPosition ) <= COLLISION_TOLERANCE
canKill = staticmethod( canKill )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None: return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print 'Replaying recorded game %s.' % options.gameToReplay
import cPickle
f = open(options.gameToReplay)
try: recorded = cPickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir): continue
moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception('Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
def replayGame( layout, actions, display ):
import pacmanAgents, ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) for i in range(layout.getNumGhosts())]
game = rules.newGame( layout, agents[0], agents[1:], display )
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames( layout, pacman, ghosts, display, numGames, record, numTraining = 0, catchExceptions=False, timeout=30 ):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range( numGames ):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame( layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet: games.append(game)
if record:
import time, cPickle
fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
cPickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True)/ float(len(wins))
print 'Average Score:', sum(scores) / float(len(scores))
print 'Scores: ', ', '.join([str(score) for score in scores])
print 'Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate)
print 'Record: ', ', '.join([ ['Loss', 'Win'][int(w)] for w in wins])
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand( sys.argv[1:] ) # Get game components based on input
runGames( **args )
# import cProfile
# cProfile.run("runGames( **args )")
pass
|
MarcosCommunity/odoo
|
refs/heads/marcos-8.0
|
addons/fleet/__init__.py
|
440
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fleet
|
ajgallegog/gem5_arm
|
refs/heads/master
|
ext/ply/doc/makedoc.py
|
177
|
#!/usr/local/bin/python
###############################################################################
# Takes a chapter as input and adds internal links and numbering to all
# of the H1, H2, H3, H4 and H5 sections.
#
# Every heading HTML tag (H1, H2 etc) is given an autogenerated name to link
# to. However, if the name is not an autogenerated name from a previous run,
# it will be kept. If it is autogenerated, it might change on subsequent runs
# of this program. Thus if you want to create links to one of the headings,
# then change the heading link name to something that does not look like an
# autogenerated link name.
###############################################################################
import sys
import re
import string
###############################################################################
# Functions
###############################################################################
# Regexs for <a name="..."></a>
alink = re.compile(r"<a *name *= *\"(.*)\"></a>", re.IGNORECASE)
heading = re.compile(r"(_nn\d)", re.IGNORECASE)
def getheadingname(m):
autogeneratedheading = True;
if m.group(1) != None:
amatch = alink.match(m.group(1))
if amatch:
# A non-autogenerated heading - keep it
headingname = amatch.group(1)
autogeneratedheading = heading.match(headingname)
if autogeneratedheading:
# The heading name was either non-existent or autogenerated,
# We can create a new heading / change the existing heading
headingname = "%s_nn%d" % (filenamebase, nameindex)
return headingname
###############################################################################
# Main program
###############################################################################
if len(sys.argv) != 2:
print "usage: makedoc.py filename"
sys.exit(1)
filename = sys.argv[1]
filenamebase = string.split(filename,".")[0]
section = 0
subsection = 0
subsubsection = 0
subsubsubsection = 0
nameindex = 0
name = ""
# Regexs for <h1>,... <h5> sections
h1 = re.compile(r".*?<H1>(<a.*a>)*[\d\.\s]*(.*?)</H1>", re.IGNORECASE)
h2 = re.compile(r".*?<H2>(<a.*a>)*[\d\.\s]*(.*?)</H2>", re.IGNORECASE)
h3 = re.compile(r".*?<H3>(<a.*a>)*[\d\.\s]*(.*?)</H3>", re.IGNORECASE)
h4 = re.compile(r".*?<H4>(<a.*a>)*[\d\.\s]*(.*?)</H4>", re.IGNORECASE)
h5 = re.compile(r".*?<H5>(<a.*a>)*[\d\.\s]*(.*?)</H5>", re.IGNORECASE)
data = open(filename).read() # Read data
open(filename+".bak","w").write(data) # Make backup
lines = data.splitlines()
result = [ ] # This is the result of postprocessing the file
index = "<!-- INDEX -->\n<div class=\"sectiontoc\">\n" # index contains the index for adding at the top of the file. Also printed to stdout.
skip = 0
skipspace = 0
for s in lines:
if s == "<!-- INDEX -->":
if not skip:
result.append("@INDEX@")
skip = 1
else:
skip = 0
continue;
if skip:
continue
if not s and skipspace:
continue
if skipspace:
result.append("")
result.append("")
skipspace = 0
m = h2.match(s)
if m:
prevheadingtext = m.group(2)
nameindex += 1
section += 1
headingname = getheadingname(m)
result.append("""<H2><a name="%s"></a>%d. %s</H2>""" % (headingname,section, prevheadingtext))
if subsubsubsection:
index += "</ul>\n"
if subsubsection:
index += "</ul>\n"
if subsection:
index += "</ul>\n"
if section == 1:
index += "<ul>\n"
index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext)
subsection = 0
subsubsection = 0
subsubsubsection = 0
skipspace = 1
continue
m = h3.match(s)
if m:
prevheadingtext = m.group(2)
nameindex += 1
subsection += 1
headingname = getheadingname(m)
result.append("""<H3><a name="%s"></a>%d.%d %s</H3>""" % (headingname,section, subsection, prevheadingtext))
if subsubsubsection:
index += "</ul>\n"
if subsubsection:
index += "</ul>\n"
if subsection == 1:
index += "<ul>\n"
index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext)
subsubsection = 0
skipspace = 1
continue
m = h4.match(s)
if m:
prevheadingtext = m.group(2)
nameindex += 1
subsubsection += 1
subsubsubsection = 0
headingname = getheadingname(m)
result.append("""<H4><a name="%s"></a>%d.%d.%d %s</H4>""" % (headingname,section, subsection, subsubsection, prevheadingtext))
if subsubsubsection:
index += "</ul>\n"
if subsubsection == 1:
index += "<ul>\n"
index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext)
skipspace = 1
continue
m = h5.match(s)
if m:
prevheadingtext = m.group(2)
nameindex += 1
subsubsubsection += 1
headingname = getheadingname(m)
result.append("""<H5><a name="%s"></a>%d.%d.%d.%d %s</H5>""" % (headingname,section, subsection, subsubsection, subsubsubsection, prevheadingtext))
if subsubsubsection == 1:
index += "<ul>\n"
index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext)
skipspace = 1
continue
result.append(s)
if subsubsubsection:
index += "</ul>\n"
if subsubsection:
index += "</ul>\n"
if subsection:
index += "</ul>\n"
if section:
index += "</ul>\n"
index += "</div>\n<!-- INDEX -->\n"
data = "\n".join(result)
data = data.replace("@INDEX@",index) + "\n";
# Write the file back out
open(filename,"w").write(data)
|
bradleypj823/swift
|
refs/heads/master
|
swift/obj/mem_diskfile.py
|
10
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Disk File Interface for Swift Object Server"""
import time
import hashlib
from contextlib import contextmanager
from eventlet import Timeout
from six import moves
from swift.common.utils import Timestamp
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
from swift.common.swob import multi_range_iterator
class InMemoryFileSystem(object):
"""
A very simplistic in-memory file system scheme.
There is one dictionary mapping a given object name to a tuple. The first
entry in the tuble is the cStringIO buffer representing the file contents,
the second entry is the metadata dictionary.
"""
def __init__(self):
self._filesystem = {}
def get_object(self, name):
val = self._filesystem.get(name)
if val is None:
data, metadata = None, None
else:
data, metadata = val
return data, metadata
def put_object(self, name, data, metadata):
self._filesystem[name] = (data, metadata)
def del_object(self, name):
del self._filesystem[name]
def get_diskfile(self, account, container, obj, **kwargs):
return DiskFile(self, account, container, obj)
def pickle_async_update(self, *args, **kwargs):
"""
For now don't handle async updates.
"""
pass
class DiskFileWriter(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for DiskFile's create()
method.
:param fs: internal file system object to use
:param name: standard object name
:param fp: `StringIO` in-memory representation object
"""
def __init__(self, fs, name, fp):
self._filesystem = fs
self._name = name
self._fp = fp
self._upload_size = 0
def write(self, chunk):
"""
Write a chunk of data into the `StringIO` object.
:param chunk: the chunk of data to write as a string object
"""
self._fp.write(chunk)
self._upload_size += len(chunk)
return self._upload_size
def put(self, metadata):
"""
Make the final association in the in-memory file system for this name
with the `StringIO` object.
:param metadata: dictionary of metadata to be written
:param extension: extension to be used when making the file
"""
metadata['name'] = self._name
self._filesystem.put_object(self._name, self._fp, metadata)
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
mem_diskfile type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
class DiskFileReader(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the read context for servicing GET REST API
requests. Serves as the context manager object for DiskFile's reader()
method.
:param name: object name
:param fp: open file object pointer reference
:param obj_size: on-disk size of object in bytes
:param etag: MD5 hash of object from metadata
"""
def __init__(self, name, fp, obj_size, etag):
self._name = name
self._fp = fp
self._obj_size = obj_size
self._etag = etag
#
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._suppress_file_closing = False
#
self.was_quarantined = ''
def __iter__(self):
try:
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
while True:
chunk = self._fp.read()
if chunk:
if self._iter_etag:
self._iter_etag.update(chunk)
self._bytes_read += len(chunk)
yield chunk
else:
self._read_to_eof = True
break
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_range(self, start, stop):
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
try:
self.close()
except DiskFileQuarantined:
pass
def _quarantine(self, msg):
self.was_quarantined = msg
def _handle_close_quarantine(self):
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self.bytes_read, self._obj_size))
elif self._iter_etag and \
self._etag != self._iter_etag.hexdigest():
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._iter_etag.hexdigest()))
def close(self):
"""
Close the file. Will handle quarantining file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except (Exception, Timeout):
pass
finally:
self._fp = None
class DiskFile(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation. This
example duck-types the reference implementation DiskFile class.
Manage object files in-memory.
:param mgr: DiskFileManager
:param device_path: path to the target device or drive
:param threadpool: thread pool to use for blocking operations
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param keep_cache: caller's preference for keeping data read in the cache
"""
def __init__(self, fs, account, container, obj):
self._name = '/' + '/'.join((account, container, obj))
self._metadata = None
self._fp = None
self._filesystem = fs
def open(self):
"""
Open the file and read the metadata.
This method must populate the _metadata attribute.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileDeleted: if it does not exist, or a tombstone is
present
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
"""
fp, self._metadata = self._filesystem.get_object(self._name)
if fp is None:
raise DiskFileDeleted()
self._fp = self._verify_data_file(fp)
self._metadata = self._metadata or {}
return self
def __enter__(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
if self._fp is not None:
self._fp = None
def _verify_data_file(self, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileNotExist: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(self._name, "missing name metadata")
else:
if mname != self._name:
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time():
raise DiskFileNotExist('Expired')
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
self._name, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
try:
fp.seek(0, 2)
obj_size = fp.tell()
fp.seek(0, 0)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(self._name, "not stat-able: %s" % err)
if obj_size != metadata_size:
raise self._quarantine(
self._name, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, obj_size))
return fp
def get_metadata(self):
"""
Provide the metadata for an object as a dictionary.
:returns: object's metadata dictionary
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object.
:returns: metadata dictionary for an object
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False):
"""
Return a swift.common.swob.Response class compatible "app_iter"
object. The responsibility of closing the open file is passed to the
DiskFileReader object.
:param keep_cache:
"""
dr = DiskFileReader(self._name, self._fp,
int(self._metadata['Content-Length']),
self._metadata['ETag'])
# At this point the reader object is now responsible for
# the file pointer.
self._fp = None
return dr
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
fp = moves.cStringIO()
try:
yield DiskFileWriter(self._filesystem, self._name, fp)
finally:
del fp
def write_metadata(self, metadata):
"""
Write a block of metadata to an object.
"""
cur_fp = self._filesystem.get(self._name)
if cur_fp is not None:
self._filesystem[self._name] = (cur_fp, metadata)
def delete(self, timestamp):
"""
Perform a delete for the given object in the given container under the
given account.
This creates a tombstone file with the given timestamp, and removes
any older versions of the object file. Any file that has an older
timestamp than timestamp will be deleted.
:param timestamp: timestamp to compare with each file
"""
fp, md = self._filesystem.get_object(self._name)
if md and md['X-Timestamp'] < Timestamp(timestamp):
self._filesystem.del_object(self._name)
|
tiefpunkt/thingstore
|
refs/heads/master
|
ThingStoreProject/settings.py
|
1
|
# Django settings for ThingStoreProject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'devel.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@h@s$d6=h#vz36d8z+$vd(ka$b(4&v6&286pg51xx3e7d23rco'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ThingStoreProject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'ThingStoreProject.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'thingstore',
'tastypie',
'django_extensions',
'registration'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'thingstore.auth.APIKeyBackend',)
TEMPLATE_CONTEXT_PROCESSORS = ('django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',)
# Settings for registraion module
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = "/"
PASSWORD_MIN_LENGTH = 8
|
louietsai/python-for-android
|
refs/heads/master
|
python-modules/zope/zope/interface/interfaces.py
|
50
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface Package Interfaces
$Id: interfaces.py 110699 2010-04-09 08:16:17Z regebro $
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.interface.interface import Attribute
class IElement(Interface):
"""Objects that have basic documentation and tagged values.
"""
__name__ = Attribute('__name__', 'The object name')
__doc__ = Attribute('__doc__', 'The object doc string')
def getTaggedValue(tag):
"""Returns the value associated with `tag`.
Raise a `KeyError` of the tag isn't set.
"""
def queryTaggedValue(tag, default=None):
"""Returns the value associated with `tag`.
Return the default value of the tag isn't set.
"""
def getTaggedValueTags():
"""Returns a list of all tags."""
def setTaggedValue(tag, value):
"""Associates `value` with `key`."""
class IAttribute(IElement):
"""Attribute descriptors"""
interface = Attribute('interface',
'Stores the interface instance in which the '
'attribute is located.')
class IMethod(IAttribute):
"""Method attributes"""
def getSignatureInfo():
"""Returns the signature information.
This method returns a dictionary with the following keys:
o `positional` - All positional arguments.
o `required` - A list of all required arguments.
o `optional` - A list of all optional arguments.
o `varargs` - The name of the varargs argument.
o `kwargs` - The name of the kwargs argument.
"""
def getSignatureString():
"""Return a signature string suitable for inclusion in documentation.
This method returns the function signature string. For example, if you
have `func(a, b, c=1, d='f')`, then the signature string is `(a, b,
c=1, d='f')`.
"""
class ISpecification(Interface):
"""Object Behavioral specifications"""
def extends(other, strict=True):
"""Test whether a specification extends another
The specification extends other if it has other as a base
interface or if one of it's bases extends other.
If strict is false, then the specification extends itself.
"""
def isOrExtends(other):
"""Test whether the specification is or extends another
"""
def weakref(callback=None):
"""Return a weakref to the specification
This method is, regrettably, needed to allow weakrefs to be
computed to security-proxied specifications. While the
zope.interface package does not require zope.security or
zope.proxy, it has to be able to coexist with it.
"""
__bases__ = Attribute("""Base specifications
A tuple if specifications from which this specification is
directly derived.
""")
__sro__ = Attribute("""Specification-resolution order
A tuple of the specification and all of it's ancestor
specifications from most specific to least specific.
(This is similar to the method-resolution order for new-style classes.)
""")
__iro__ = Attribute("""Interface-resolution order
A tuple of the of the specification's ancestor interfaces from
most specific to least specific. The specification itself is
included if it is an interface.
(This is similar to the method-resolution order for new-style classes.)
""")
def get(name, default=None):
"""Look up the description for a name
If the named attribute is not defined, the default is
returned.
"""
class IInterface(ISpecification, IElement):
"""Interface objects
Interface objects describe the behavior of an object by containing
useful information about the object. This information includes:
o Prose documentation about the object. In Python terms, this
is called the "doc string" of the interface. In this element,
you describe how the object works in prose language and any
other useful information about the object.
o Descriptions of attributes. Attribute descriptions include
the name of the attribute and prose documentation describing
the attributes usage.
o Descriptions of methods. Method descriptions can include:
- Prose "doc string" documentation about the method and its
usage.
- A description of the methods arguments; how many arguments
are expected, optional arguments and their default values,
the position or arguments in the signature, whether the
method accepts arbitrary arguments and whether the method
accepts arbitrary keyword arguments.
o Optional tagged data. Interface objects (and their attributes and
methods) can have optional, application specific tagged data
associated with them. Examples uses for this are examples,
security assertions, pre/post conditions, and other possible
information you may want to associate with an Interface or its
attributes.
Not all of this information is mandatory. For example, you may
only want the methods of your interface to have prose
documentation and not describe the arguments of the method in
exact detail. Interface objects are flexible and let you give or
take any of these components.
Interfaces are created with the Python class statement using
either Interface.Interface or another interface, as in::
from zope.interface import Interface
class IMyInterface(Interface):
'''Interface documentation'''
def meth(arg1, arg2):
'''Documentation for meth'''
# Note that there is no self argument
class IMySubInterface(IMyInterface):
'''Interface documentation'''
def meth2():
'''Documentation for meth2'''
You use interfaces in two ways:
o You assert that your object implement the interfaces.
There are several ways that you can assert that an object
implements an interface:
1. Call zope.interface.implements in your class definition.
2. Call zope.interfaces.directlyProvides on your object.
3. Call 'zope.interface.classImplements' to assert that instances
of a class implement an interface.
For example::
from zope.interface import classImplements
classImplements(some_class, some_interface)
This approach is useful when it is not an option to modify
the class source. Note that this doesn't affect what the
class itself implements, but only what its instances
implement.
o You query interface meta-data. See the IInterface methods and
attributes for details.
"""
def providedBy(object):
"""Test whether the interface is implemented by the object
Return true of the object asserts that it implements the
interface, including asserting that it implements an extended
interface.
"""
def implementedBy(class_):
"""Test whether the interface is implemented by instances of the class
Return true of the class asserts that its instances implement the
interface, including asserting that they implement an extended
interface.
"""
def names(all=False):
"""Get the interface attribute names
Return a sequence of the names of the attributes, including
methods, included in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def namesAndDescriptions(all=False):
"""Get the interface attribute names and descriptions
Return a sequence of the names and descriptions of the
attributes, including methods, as name-value pairs, included
in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def __getitem__(name):
"""Get the description for a name
If the named attribute is not defined, a KeyError is raised.
"""
def direct(name):
"""Get the description for the name if it was defined by the interface
If the interface doesn't define the name, returns None.
"""
def validateInvariants(obj, errors=None):
"""Validate invariants
Validate object to defined invariants. If errors is None,
raises first Invalid error; if errors is a list, appends all errors
to list, then raises Invalid with the errors as the first element
of the "args" tuple."""
def __contains__(name):
"""Test whether the name is defined by the interface"""
def __iter__():
"""Return an iterator over the names defined by the interface
The names iterated include all of the names defined by the
interface directly and indirectly by base interfaces.
"""
__module__ = Attribute("""The name of the module defining the interface""")
class IDeclaration(ISpecification):
"""Interface declaration
Declarations are used to express the interfaces implemented by
classes or provided by objects.
"""
def __contains__(interface):
"""Test whether an interface is in the specification
Return true if the given interface is one of the interfaces in
the specification and false otherwise.
"""
def __iter__():
"""Return an iterator for the interfaces in the specification
"""
def flattened():
"""Return an iterator of all included and extended interfaces
An iterator is returned for all interfaces either included in
or extended by interfaces included in the specifications
without duplicates. The interfaces are in "interface
resolution order". The interface resolution order is such that
base interfaces are listed after interfaces that extend them
and, otherwise, interfaces are included in the order that they
were defined in the specification.
"""
def __sub__(interfaces):
"""Create an interface specification with some interfaces excluded
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are subtracted from the interface specification.
Removing an interface that is not in the specification does
not raise an error. Doing so has no effect.
Removing an interface also removes sub-interfaces of the interface.
"""
def __add__(interfaces):
"""Create an interface specification with some interfaces added
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are added to the interface specification.
Adding an interface that is already in the specification does
not raise an error. Doing so has no effect.
"""
def __nonzero__():
"""Return a true value of the interface specification is non-empty
"""
class IInterfaceDeclaration(Interface):
"""Declare and check the interfaces of objects
The functions defined in this interface are used to declare the
interfaces that objects provide and to query the interfaces that have
been declared.
Interfaces can be declared for objects in two ways:
- Interfaces are declared for instances of the object's class
- Interfaces are declared for the object directly.
The interfaces declared for an object are, therefore, the union of
interfaces declared for the object directly and the interfaces
declared for instances of the object's class.
Note that we say that a class implements the interfaces provided
by it's instances. An instance can also provide interfaces
directly. The interfaces provided by an object are the union of
the interfaces provided directly and the interfaces implemented by
the class.
"""
def providedBy(ob):
"""Return the interfaces provided by an object
This is the union of the interfaces directly provided by an
object and interfaces implemented by it's class.
The value returned is an IDeclaration.
"""
def implementedBy(class_):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
def classImplements(class_, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Consider the following example::
class C(A, B):
...
classImplements(C, I1, I2)
Instances of ``C`` provide ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` provide.
"""
def implementer(*interfaces):
"""Create a decorator for declaring interfaces implemented by a facory
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def classImplementsOnly(class_, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace any previous declarations.
Consider the following example::
class C(A, B):
...
classImplements(C, IA, IB. IC)
classImplementsOnly(C. I1, I2)
Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
whatever interfaces instances of ``A`` and ``B`` implement.
"""
def implementer_only(*interfaces):
"""Create a decorator for declaring the only interfaces implemented
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an IDeclaration.
"""
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace interfaces previously
declared for the object.
Consider the following example::
class C(A, B):
...
ob = C()
directlyProvides(ob, I1, I2)
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``.
To remove directly provided interfaces, use ``directlyProvidedBy`` and
subtract the unwanted interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob)-I2)
removes I2 from the interfaces directly provided by
``ob``. The object, ``ob`` no longer directly provides ``I2``,
although it might still provide ``I2`` if it's class
implements ``I2``.
To add directly provided interfaces, use ``directlyProvidedBy`` and
include additional interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob), I2)
adds I2 to the interfaces directly provided by ob.
"""
def alsoProvides(object, *interfaces):
"""Declare additional interfaces directly for an object::
alsoProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob), I1)
"""
def noLongerProvides(object, interface):
"""Remove an interface from the list of an object's directly
provided interfaces::
noLongerProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob)-I1)
with the exception that if ``I1`` is an interface that is
provided by ``ob`` through the class's implementation,
ValueError is raised.
"""
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
Consider the following example::
class C(A, B):
implements(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` implement.
"""
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
Consider the following example::
class C(A, B):
implementsOnly(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, regardless of what
instances of ``A`` and ``B`` implement.
"""
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the class's direct-object
interface specification. An error will be raised if the module
class has an direct interface specification. In other words, it is
an error to call this function more than once in a class
definition.
Note that the given interfaces have nothing to do with the
interfaces implemented by instances of the class.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
def provider(*interfaces):
"""A class decorator version of classProvides"""
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the module's direct-object
interface specification. An error will be raised if the module
already has an interface specification. In other words, it is
an error to call this function more than once in a module
definition.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a module. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
def Declaration(*interfaces):
"""Create an interface specification
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
A new interface specification (IDeclaration) with
the given interfaces is returned.
"""
class IAdapterRegistry(Interface):
"""Provide an interface-based registry for adapters
This registry registers objects that are in some sense "from" a
sequence of specification to an interface and a name.
No specific semantics are assumed for the registered objects,
however, the most common application will be to register factories
that adapt objects providing required specifications to a provided
interface.
"""
def register(required, provided, name, value):
"""Register a value
A value is registered for a *sequence* of required specifications, a
provided interface, and a name.
"""
def registered(required, provided, name=u''):
"""Return the component registered for the given interfaces and name
Unlike the lookup method, this methods won't retrieve
components registered for more specific required interfaces or
less specific provided interfaces.
If no component was registered exactly for the given
interfaces and name, then None is returned.
"""
def lookup(required, provided, name='', default=None):
"""Lookup a value
A value is looked up based on a *sequence* of required
specifications, a provided interface, and a name.
"""
def queryMultiAdapter(objects, provided, name=u'', default=None):
"""Adapt a sequence of objects to a named, provided, interface
"""
def lookup1(required, provided, name=u'', default=None):
"""Lookup a value using a single required interface
A value is looked up based on a single required
specifications, a provided interface, and a name.
"""
def queryAdapter(object, provided, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def adapter_hook(provided, object, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def lookupAll(required, provided):
"""Find all adapters from the required to the provided interfaces
An iterable object is returned that provides name-value two-tuples.
"""
def names(required, provided):
"""Return the names for which there are registered objects
"""
def subscribe(required, provided, subscriber, name=u''):
"""Register a subscriber
A subscriber is registered for a *sequence* of required
specifications, a provided interface, and a name.
Multiple subscribers may be registered for the same (or
equivalent) interfaces.
"""
def subscriptions(required, provided, name=u''):
"""Get a sequence of subscribers
Subscribers for a *sequence* of required interfaces, and a provided
interface are returned.
"""
def subscribers(objects, provided, name=u''):
"""Get a sequence of subscription adapters
"""
|
jsoref/django
|
refs/heads/master
|
django/db/models/sql/where.py
|
439
|
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.db.models.sql.datastructures import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if query._fields is None:
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
if query.can_filter():
# If there is no slicing in use, then we can safely drop all ordering
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(
self.alias, self.columns, self.targets,
self.query_object)
|
portableant/open-context-py
|
refs/heads/master
|
opencontext_py/apps/edit/views.py
|
2
|
import json
from django.http import HttpResponse, Http404
from django.template import RequestContext, loader
from opencontext_py.apps.ocitems.ocitem.models import OCitem
from opencontext_py.apps.ocitems.ocitem.templating import TemplateItem
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_coe_cluster_template.py
|
31
|
#!/usr/bin/python
# Copyright (c) 2018 Catalyst IT Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_coe_cluster_template
short_description: Add/Remove COE cluster template from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.7"
author: "Feilong Wang (@flwang)"
description:
- Add or Remove COE cluster template from the OpenStack Container Infra
service.
options:
availability_zone:
description:
- Ignored. Present for backwards compatibility
coe:
description:
- The Container Orchestration Engine for this clustertemplate
choices: [kubernetes, swarm, mesos]
dns_nameserver:
description:
- The DNS nameserver address
default: '8.8.8.8'
docker_storage_driver:
description:
- Docker storage driver
choices: [devicemapper, overlay]
docker_volume_size:
description:
- The size in GB of the docker volume
external_network_id:
description:
- The external network to attach to the Cluster
fixed_network:
description:
- The fixed network name to attach to the Cluster
fixed_subnet:
description:
- The fixed subnet name to attach to the Cluster
flavor_id:
description:
- The flavor of the minion node for this ClusterTemplate
floating_ip_enabled:
description:
- Indicates whether created clusters should have a floating ip or not
type: bool
default: 'yes'
keypair_id:
description:
- Name or ID of the keypair to use.
image_id:
description:
- Image id the cluster will be based on
labels:
description:
- One or more key/value pairs
http_proxy:
description:
- Address of a proxy that will receive all HTTP requests and relay them
The format is a URL including a port number
https_proxy:
description:
- Address of a proxy that will receive all HTTPS requests and relay
them. The format is a URL including a port number
master_flavor_id:
description:
- The flavor of the master node for this ClusterTemplate
master_lb_enabled:
description:
- Indicates whether created clusters should have a load balancer
for master nodes or not
type: bool
default: 'no'
name:
description:
- Name that has to be given to the cluster template
required: true
network_driver:
description:
- The name of the driver used for instantiating container networks
choices: [flannel, calico, docker]
no_proxy:
description:
- A comma separated list of IPs for which proxies should not be
used in the cluster
public:
description:
- Indicates whether the ClusterTemplate is public or not
type: bool
default: 'no'
registry_enabled:
description:
- Indicates whether the docker registry is enabled
type: bool
default: 'no'
server_type:
description:
- Server type for this ClusterTemplate
choices: [vm, bm]
default: vm
state:
description:
- Indicate desired state of the resource.
choices: [present, absent]
default: present
tls_disabled:
description:
- Indicates whether the TLS should be disabled
type: bool
default: 'no'
volume_driver:
description:
- The name of the driver used for instantiating container volumes
choices: [cinder, rexray]
requirements: ["openstacksdk"]
'''
RETURN = '''
id:
description: The cluster UUID.
returned: On success when I(state) is 'present'
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
cluster_template:
description: Dictionary describing the template.
returned: On success when I(state) is 'present'
type: complex
contains:
coe:
description: The Container Orchestration Engine for this clustertemplate
type: str
sample: kubernetes
dns_nameserver:
description: The DNS nameserver address
type: str
sample: '8.8.8.8'
docker_storage_driver:
description: Docker storage driver
type: str
sample: devicemapper
docker_volume_size:
description: The size in GB of the docker volume
type: int
sample: 5
external_network_id:
description: The external network to attach to the Cluster
type: str
sample: public
fixed_network:
description: The fixed network name to attach to the Cluster
type: str
sample: 07767ec6-85f5-44cb-bd63-242a8e7f0d9d
fixed_subnet:
description:
- The fixed subnet name to attach to the Cluster
type: str
sample: 05567ec6-85f5-44cb-bd63-242a8e7f0d9d
flavor_id:
description:
- The flavor of the minion node for this ClusterTemplate
type: str
sample: c1.c1r1
floating_ip_enabled:
description:
- Indicates whether created clusters should have a floating ip or not
type: bool
default: true
keypair_id:
description:
- Name or ID of the keypair to use.
type: str
sample: mykey
image_id:
description:
- Image id the cluster will be based on
type: str
sample: 05567ec6-85f5-44cb-bd63-242a8e7f0e9d
labels:
description: One or more key/value pairs
type: dict
sample: {'key1': 'value1', 'key2': 'value2'}
http_proxy:
description:
- Address of a proxy that will receive all HTTP requests and relay them
The format is a URL including a port number
type: str
sample: http://10.0.0.11:9090
https_proxy:
description:
- Address of a proxy that will receive all HTTPS requests and relay
them. The format is a URL including a port number
type: str
sample: https://10.0.0.10:8443
master_flavor_id:
description:
- The flavor of the master node for this ClusterTemplate
type: str
sample: c1.c1r1
master_lb_enabled:
description:
- Indicates whether created clusters should have a load balancer
for master nodes or not
type: bool
sample: true
name:
description:
- Name that has to be given to the cluster template
type: str
sample: k8scluster
network_driver:
description:
- The name of the driver used for instantiating container networks
type: str
sample: calico
no_proxy:
description:
- A comma separated list of IPs for which proxies should not be
used in the cluster
type: str
sample: 10.0.0.4,10.0.0.5
public:
description:
- Indicates whether the ClusterTemplate is public or not
type: bool
sample: false
registry_enabled:
description:
- Indicates whether the docker registry is enabled
type: bool
sample: false
server_type:
description:
- Server type for this ClusterTemplate
type: str
sample: vm
tls_disabled:
description:
- Indicates whether the TLS should be disabled
type: bool
sample: false
volume_driver:
description:
- The name of the driver used for instantiating container volumes
type: str
sample: cinder
'''
EXAMPLES = '''
# Create a new Kubernetes cluster template
- os_coe_cluster_template:
name: k8s
coe: kubernetes
keypair_id: mykey
image_id: 2a8c9888-9054-4b06-a1ca-2bb61f9adb72
public: no
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _parse_labels(labels):
if isinstance(labels, str):
labels_dict = {}
for kv_str in labels.split(","):
k, v = kv_str.split("=")
labels_dict[k] = v
return labels_dict
if not labels:
return {}
return labels
def main():
argument_spec = openstack_full_argument_spec(
coe=dict(required=True, choices=['kubernetes', 'swarm', 'mesos']),
dns_nameserver=dict(default='8.8.8.8'),
docker_storage_driver=dict(choices=['devicemapper', 'overlay']),
docker_volume_size=dict(type='int'),
external_network_id=dict(default=None),
fixed_network=dict(default=None),
fixed_subnet=dict(default=None),
flavor_id=dict(default=None),
floating_ip_enabled=dict(type='bool', default=True),
keypair_id=dict(default=None),
image_id=dict(required=True),
labels=dict(default=None, type='raw'),
http_proxy=dict(default=None),
https_proxy=dict(default=None),
master_lb_enabled=dict(type='bool', default=False),
master_flavor_id=dict(default=None),
name=dict(required=True),
network_driver=dict(choices=['flannel', 'calico', 'docker']),
no_proxy=dict(default=None),
public=dict(type='bool', default=False),
registry_enabled=dict(type='bool', default=False),
server_type=dict(default="vm", choices=['vm', 'bm']),
state=dict(default='present', choices=['absent', 'present']),
tls_disabled=dict(type='bool', default=False),
volume_driver=dict(choices=['cinder', 'rexray']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
params = module.params.copy()
state = module.params['state']
name = module.params['name']
coe = module.params['coe']
image_id = module.params['image_id']
kwargs = dict(
dns_nameserver=module.params['dns_nameserver'],
docker_storage_driver=module.params['docker_storage_driver'],
docker_volume_size=module.params['docker_volume_size'],
external_network_id=module.params['external_network_id'],
fixed_network=module.params['fixed_network'],
fixed_subnet=module.params['fixed_subnet'],
flavor_id=module.params['flavor_id'],
floating_ip_enabled=module.params['floating_ip_enabled'],
keypair_id=module.params['keypair_id'],
labels=_parse_labels(params['labels']),
http_proxy=module.params['http_proxy'],
https_proxy=module.params['https_proxy'],
master_lb_enabled=module.params['master_lb_enabled'],
master_flavor_id=module.params['master_flavor_id'],
network_driver=module.params['network_driver'],
no_proxy=module.params['no_proxy'],
public=module.params['public'],
registry_enabled=module.params['registry_enabled'],
server_type=module.params['server_type'],
tls_disabled=module.params['tls_disabled'],
volume_driver=module.params['volume_driver'],
)
sdk, cloud = openstack_cloud_from_module(module)
try:
changed = False
template = cloud.get_coe_cluster_template(name_or_id=name, filters={'coe': coe, 'image_id': image_id})
if state == 'present':
if not template:
template = cloud.create_coe_cluster_template(name, coe=coe, image_id=image_id, **kwargs)
changed = True
else:
changed = False
module.exit_json(changed=changed, cluster_template=template, id=template['uuid'])
elif state == 'absent':
if not template:
module.exit_json(changed=False)
else:
cloud.delete_coe_cluster_template(name)
module.exit_json(changed=True)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == "__main__":
main()
|
MadManRises/Madgine
|
refs/heads/master
|
shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/deep_mimic/env/env.py
|
4
|
from abc import abstractmethod
import sys, abc
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
import numpy as np
from enum import Enum
class Env(ABC):
class Terminate(Enum):
Null = 0
Fail = 1
Succ = 2
def __init__(self, args, enable_draw):
self.enable_draw = enable_draw
return
|
dulaccc/django-accounting
|
refs/heads/master
|
accounting/apps/people/admin.py
|
3
|
from django.contrib import admin
from . import models
@admin.register(models.Client)
class ClientAdmin(admin.ModelAdmin):
pass
@admin.register(models.Employee)
class EmployeeAdmin(admin.ModelAdmin):
pass
|
yland/coala-bears
|
refs/heads/master
|
bears/php/__init__.py
|
12133432
| |
lfasmpao/safecore-api
|
refs/heads/master
|
administrator/models.py
|
12133432
| |
cjerdonek/pip
|
refs/heads/develop
|
tests/data/packages/LocalExtras/localextras/__init__.py
|
12133432
| |
xuru/pyvisdk
|
refs/heads/master
|
pyvisdk/enums/dv_port_status_vm_direct_path_gen2_inactive_reason_other.py
|
1
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
DVPortStatusVmDirectPathGen2InactiveReasonOther = Enum(
'portNptIncompatibleConnectee',
'portNptIncompatibleHost',
)
|
WillieMaddox/scipy
|
refs/heads/master
|
scipy/io/wavfile.py
|
13
|
"""
Module to read / write wav files using numpy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a numpy array as a WAV file.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
# assumes file pointer is immediately
# after the 'fmt ' id
def _read_fmt_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
comp, noc, rate, sbytes, ba, bits = res
if comp == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order, rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
comp = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if comp not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
# move file pointer to next chunk
if size > (bytes_read):
fid.read(size - bytes_read)
return size, comp, noc, rate, sbytes, ba, bits
# assumes file pointer is immediately
# after the 'data' id
def _read_data_chunk(fid, comp, noc, bits, is_big_endian, mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
size = struct.unpack(fmt, fid.read(4))[0]
bytes = bits//8
if bits == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if comp == 1:
dtype += 'i%d' % bytes
else:
dtype += 'f%d' % bytes
if not mmap:
data = numpy.fromstring(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes,))
fid.seek(start + size)
if noc > 1:
data = data.reshape(-1, noc)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4)
if str1 == b'RIFF':
is_big_endian = False
elif str1 == b'RIFX':
is_big_endian = True
else:
raise ValueError("Not a WAV file.")
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
fsize = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if (str2 != b'WAVE'):
raise ValueError("Not a WAV file.")
return fsize, is_big_endian
# open a wave-file
def read(filename, mmap=False):
"""
Return the sample rate (in samples/sec) and data from a WAV file
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory mapped.
Only to be used on real files (Default: False)
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file
data : numpy array
Data read from wav file
Notes
-----
* The file can be an open file or a filename.
* The returned sample rate is a Python integer.
* The data is returned as a numpy array with a data-type determined
from the file.
* This function cannot read wav files with 24 bit data.
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
fsize, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
noc = 1
bits = 8
comp = WAVE_FORMAT_PCM
while (fid.tell() < fsize):
# read the next chunk
chunk_id = fid.read(4)
if chunk_id == b'fmt ':
fmt_chunk_received = True
size, comp, noc, rate, sbytes, ba, bits = _read_fmt_chunk(fid, is_big_endian=is_big_endian)
if bits == 24:
raise ValueError("Unsupported bit depth: the wav file "
"has 24 bit data.")
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian=is_big_endian)
elif chunk_id == b'data':
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, comp, noc, bits, is_big_endian=is_big_endian, mmap=mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian=is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian=is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning)
_skip_unknown_chunk(fid, is_big_endian=is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return rate, data
# Write a wave-file
# sample rate, data
def write(filename, rate, data):
"""
Write a numpy array as a WAV file
Parameters
----------
filename : string or open file handle
Output wav file
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D numpy array of either integer or float data-type.
Notes
-----
* The file can be an open file or a filename.
* Writes a simple uncompressed WAV file.
* The bits-per-sample will be determined by the data-type.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
comp = 3
else:
comp = 1
if data.ndim == 1:
noc = 1
else:
noc = data.shape[1]
bits = data.dtype.itemsize * 8
sbytes = rate*(bits // 8)*noc
ba = noc * (bits // 8)
fmt_chunk_data = struct.pack('<HHIIHH', comp, noc, rate, sbytes,
ba, bits)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
|
ehashman/oh-mainline
|
refs/heads/master
|
vendor/packages/zope.interface/build_ext_3.py
|
16
|
import os
import sys
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
try:
from setuptools.command.build_ext import build_ext
from pkg_resources import (normalize_path, working_set,
add_activation_listener, require)
except ImportError:
raise RuntimeError("zope.interface requires Distribute under Python 3. "
"See http://packages.python.org/distribute")
class optional_build_ext(build_ext):
"""This class subclasses build_ext and allows
the building of C extensions to fail.
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._unavailable(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError) as e:
self._unavailable(e)
def _unavailable(self, e):
print('*' * 80, file=sys.stderr)
print("""WARNING:
An optional code optimization (C extension) could not be compiled.
Optimizations for this package will not be available!""", file=sys.stderr)
print(file=sys.stderr)
print(e, file=sys.stderr)
print('*' * 80, file=sys.stderr)
|
ryokochang/Slab-GCS
|
refs/heads/master
|
packages/IronPython.StdLib.2.7.5-beta1/content/Lib/distutils/command/sdist.py
|
41
|
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = "$Id$"
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import (DistutilsPlatformError, DistutilsOptionError,
DistutilsTemplateError)
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = {'posix': 'gztar',
'nt': 'zip' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior:
# the file list is recalculated everytime because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior.
template_exists = os.path.isfile(self.template)
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
# pruning out vcs directories
# both separators are used under win32
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if os.path.isfile(self.manifest):
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
if first_line != '# file GENERATED by distutils, do NOT edit\n':
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
|
saurabhbajaj207/CarpeDiem
|
refs/heads/master
|
venv/Lib/site-packages/pkg_resources/_vendor/six.py
|
2715
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
Richard2ndQuadrant/ansible
|
refs/heads/devel
|
contrib/inventory/linode.py
|
109
|
#!/usr/bin/env python
'''
Linode external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Linode using the Chube library.
NOTE: This script assumes Ansible is being executed where Chube is already
installed and has a valid config at ~/.chube. If not, run:
pip install chube
echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube
For more details, see: https://github.com/exosite/chube
NOTE: This script also assumes that the Linodes in your account all have
labels that correspond to hostnames that are in your resolver search path.
Your resolver search path resides in /etc/hosts.
When run against a specific host, this script returns the following variables:
- api_id
- datacenter_id
- datacenter_city (lowercase city name of data center, e.g. 'tokyo')
- label
- display_group
- create_dt
- total_hd
- total_xfer
- total_ram
- status
- public_ip (The first public IP found)
- private_ip (The first private IP found, or empty string if none)
- alert_cpu_enabled
- alert_cpu_threshold
- alert_diskio_enabled
- alert_diskio_threshold
- alert_bwin_enabled
- alert_bwin_threshold
- alert_bwout_enabled
- alert_bwout_threshold
- alert_bwquota_enabled
- alert_bwquota_threshold
- backup_weekly_daily
- backup_window
- watchdog
Peter Sankauskas did most of the legwork here with his linode plugin; I
just adapted that for Linode.
'''
# (c) 2013, Dan Slimmon
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Standard imports
import os
import re
import sys
import argparse
from time import time
try:
import json
except ImportError:
import simplejson as json
try:
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
except:
try:
# remove local paths and other stuff that may
# cause an import conflict, as chube is sensitive
# to name collisions on importing
old_path = sys.path
sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))]
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
sys.path = old_path
except Exception as e:
raise Exception("could not import chube")
load_chube_config()
# Imports for ansible
import ConfigParser
class LinodeInventory(object):
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = {}
# Index of label to Linode ID
self.index = {}
# Local cache of Datacenter objects populated by populate_datacenter_cache()
self._datacenter_cache = None
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of nodes for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')
# Cache related
cache_path = config.get('linode', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-linode.cache"
self.cache_path_index = cache_path + "/ansible-linode.index"
self.cache_max_age = config.getint('linode', 'cache_max_age')
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific node')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
"""Makes an Linode API call to get the list of nodes."""
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError as e:
print("Looks like Linode's API is down:")
print("")
print(e)
sys.exit(1)
def get_node(self, linode_id):
"""Gets details about a specific node."""
try:
return Linode.find(api_id=linode_id)
except chube_api.linode_api.ApiError as e:
print("Looks like Linode's API is down:")
print("")
print(e)
sys.exit(1)
def populate_datacenter_cache(self):
"""Creates self._datacenter_cache, containing all Datacenters indexed by ID."""
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc
def get_datacenter_city(self, node):
"""Returns a the lowercase city name of the node's data center."""
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location
def add_node(self, node):
"""Adds an node to the inventory and index."""
dest = node.label
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.api_id] = [dest]
# Inventory: Group by datacenter city
self.push(self.inventory, self.get_datacenter_city(node), dest)
# Inventory: Group by dipslay group
self.push(self.inventory, node.display_group, dest)
def get_host_info(self):
"""Get variables about a specific host."""
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
node_vars = {}
for direct_attr in [
"api_id",
"datacenter_id",
"label",
"display_group",
"create_dt",
"total_hd",
"total_xfer",
"total_ram",
"status",
"alert_cpu_enabled",
"alert_cpu_threshold",
"alert_diskio_enabled",
"alert_diskio_threshold",
"alert_bwin_enabled",
"alert_bwin_threshold",
"alert_bwout_enabled",
"alert_bwout_threshold",
"alert_bwquota_enabled",
"alert_bwquota_threshold",
"backup_weekly_daily",
"backup_window",
"watchdog"
]:
node_vars[direct_attr] = getattr(node, direct_attr)
node_vars["datacenter_city"] = self.get_datacenter_city(node)
node_vars["public_ip"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]
private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public]
if private_ips:
node_vars["private_ip"] = private_ips[0]
return self.json_format_dict(node_vars, True)
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""Escapes any characters that would be invalid in an ansible group name."""
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string."""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
LinodeInventory()
|
dotKom/onlineweb4
|
refs/heads/develop
|
apps/profiles/migrations/0004_auto_20180221_2241.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-21 21:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_privacy_visible_as_attending_events'),
]
operations = [
migrations.AlterField(
model_name='privacy',
name='visible_as_attending_events',
field=models.BooleanField(default=False, verbose_name='vis på sosiale arrangement'),
),
]
|
pallavishende/angular-starter-master
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
1355
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
zmwangx/you-get
|
refs/heads/develop
|
src/you_get/util/__init__.py
|
12133432
| |
agepoly/delegues
|
refs/heads/develop
|
djangobb/djangobb_forum/management/commands/__init__.py
|
12133432
| |
magic0704/neutron
|
refs/heads/master
|
neutron/tests/unit/agent/l3/__init__.py
|
12133432
| |
jdar/phantomjs-modified
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/updatesvnrevision.py
|
143
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import template
import handlers
import model
class UpdateSVNRevision(handlers.UpdateBase):
def get(self):
self.response.out.write(template.render("templates/updatesvnrevision.html", None))
def post(self):
svn_revision_number = self._int_from_request("number")
svn_revisions = model.SVNRevision.all().filter('number =', svn_revision_number).order('-date').fetch(1)
svn_revision = None
if svn_revisions:
svn_revision = svn_revisions[0]
else:
svn_revision = model.SVNRevision()
svn_revision.number = svn_revision_number
svn_revision.broken_bots.append(self.request.get("broken_bot"))
svn_revision.put()
self.response.out.write(svn_revision.key().id())
|
arruda/pyfuzzy
|
refs/heads/master
|
fuzzy/set/Triangle.py
|
1
|
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: Triangle.py,v 1.13 2009/08/07 07:19:19 rliebscher Exp $"
from fuzzy.set.Polygon import Polygon
from fuzzy.utils import prop
class Triangle(Polygon):
r"""Realize a triangle-shaped fuzzy set::
______ y_max
A
/|\
/ | \
/ | \
_/ | \_ y_min
| m |
| | |
alpha|beta
See also U{http://pyfuzzy.sourceforge.net/test/set/Triangle.png}
"""
def __init__(self,m=0.0,alpha=1.0,beta=1.0,y_max=1.0,y_min=0.0):
"""
Initialize a triangle-shaped fuzzy set.
@param y_max: y-value at top of the triangle (1.0)
@param y_min: y-value outside the triangle (0.0)
@param m: x-value of top of triangle (0.0)
@param alpha: distance of left corner to m (1.0)
@param beta: distance of right corner to m (1.0)
"""
super(Triangle, self).__init__()
self._y_max = y_max
self._y_min = y_min
self._m = m
self._alpha = alpha
self._beta = beta
self._update() # update polygon
@prop
def y_max():
"""y-value at top of the triangle
@type: float"""
def fget(self):
return self._y_max
def fset(self,value):
self._y_max = value
self._update()
return locals()
@prop
def y_min():
"""y-value outside the triangle
@type: float"""
def fget(self):
return self._y_min
def fset(self,value):
self._y_min = value
self._update()
return locals()
@prop
def m():
"""x-value of top of triangle
@type: float"""
def fget(self):
return self._m
def fset(self,value):
self._m = value
self._update()
return locals()
@prop
def alpha():
"""distance of left corner to m
@type: float"""
def fget(self):
return self._alpha
def fset(self,value):
self._alpha = value
self._update()
return locals()
@prop
def beta():
"""distance of right corner to m
@type: float"""
def fget(self):
return self._beta
def fset(self,value):
self._beta = value
self._update()
return locals()
def _update(self):
"""update polygon"""
p = super(Triangle, self)
p.clear()
p.add(self._m-self._alpha,self._y_min)
p.add(self._m,self._y_max)
p.add(self._m+self._beta,self._y_min)
def add(self,x,y,where=Polygon.END):
"""Don't let anyone destroy our triangle."""
raise Exception()
def remove(self,x,where=Polygon.END):
"""Don't let anyone destroy our triangle."""
raise Exception()
def clear(self):
"""Don't let anyone destroy our triangle."""
raise Exception()
|
vdods/heisenberg
|
refs/heads/master
|
heisenberg/plot/__init__.py
|
1
|
import heisenberg.library.monte_carlo
import heisenberg.library.orbit_plot
import heisenberg.library.shooting_method_objective
import heisenberg.util
import os
import sys
import traceback
import vorpy.pickle
subprogram_description = 'Plots an integral curve of the system using given initial condition, optionally running an optimization method to find a nearby curve that closes back up on itself.'
def plot (dynamics_context, options, *, rng):
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir, exist_ok=True)
# Plot given curve
qp_0 = options.qp_0
smo_0 = heisenberg.library.shooting_method_objective.ShootingMethodObjective(dynamics_context=dynamics_context, preimage_qp_0=options.initial_preimage, qp_0=options.qp_0, t_max=options.max_time, t_delta=options.dt)
print('smo_0.objective() = {0}'.format(smo_0.objective()))
# Construct curve_description_v
curve_description_v = []
if not options.disable_plot_initial:
curve_description_v += ['initial curve']
if options.optimize_initial:
curve_description_v += ['optimized curve']
op = heisenberg.library.orbit_plot.OrbitPlot(curve_description_v=curve_description_v, quantity_to_plot_v=options.quantity_to_plot_v, size=options.plot_size)
if options.optimize_initial:
if options.initial_preimage is not None:
X_0 = options.initial_preimage
embedding = dynamics_context.embedding(N=options.embedding_dimension, sheet_index=options.embedding_solution_sheet_index)
elif options.initial is not None:
X_0 = options.qp_0
embedding = None
else:
assert options.initial_k_fold is not None
X_0 = options.initial_k_fold
embedding = dynamics_context.embedding(N=5, sheet_index=1)
disable_salvage = True
optimizer = heisenberg.library.monte_carlo.MonteCarlo(
obj=lambda qp_0:heisenberg.library.shooting_method_objective.evaluate_shooting_method_objective(dynamics_context, qp_0, options.max_time, options.dt, disable_salvage),
initial_parameters=X_0,
inner_radius=options.optimization_annulus_bound_v[0],
outer_radius=options.optimization_annulus_bound_v[1],
rng_seed=options.seed, # TODO: Make a single RNG that's used everywhere in the program.
embedding=embedding
)
try:
actual_iteration_count = 0
for i in range(options.optimization_iterations):
optimizer.compute_next_step()
actual_iteration_count += 1
print('i = {0}, obj = {1:.17e}'.format(i, optimizer.obj_history_v[-1]))
except KeyboardInterrupt:
print('got KeyboardInterrupt -- halting optimization, but will still plot current results')
except AssertionError as e:
print('got AssertionError -- halting optimization, but will plot last good results; exception was {0}'.format(e))
print('stack:')
ex_type,ex,tb = sys.exc_info()
traceback.print_tb(tb)
qp_opt = optimizer.embedded_parameter_history_v[-1]
smo_opt = heisenberg.library.shooting_method_objective.ShootingMethodObjective(dynamics_context=dynamics_context, preimage_qp_0=optimizer.parameter_history_v[-1], qp_0=qp_opt, t_max=options.max_time, t_delta=options.dt)
print('qp_opt = {0}'.format(qp_opt))
if embedding is not None:
print('qp_opt embedding preimage; X_0 = {0}'.format(optimizer.parameter_history_v[-1]))
op.plot_curve(curve_description='optimized curve', smo=smo_opt, objective_history_v=optimizer.obj_history_v, cut_off_curve_tail=options.cut_off_optimized_curve_tail, disable_plot_decoration=options.disable_plot_decoration, use_terse_titles=options.use_terse_plot_titles)
qp = qp_opt
smo = smo_opt
else:
qp = qp_0
smo = smo_0
base_filename = os.path.join(
options.output_dir,
heisenberg.util.construct_base_filename(
symmetry_order_estimate=smo.symmetry_order_estimate(),
symmetry_class_estimate=smo.symmetry_class_estimate(),
obj=smo.objective(),
t_delta=options.dt,
t_max=options.max_time,
initial_condition=qp,
sheet_index=options.embedding_solution_sheet_index,
t_min=smo.t_min()
)
)
if not options.disable_plot_initial:
print('plotting initial curve')
op.plot_curve(curve_description='initial curve', smo=smo_0, cut_off_curve_tail=options.cut_off_initial_curve_tail, disable_plot_decoration=options.disable_plot_decoration, use_terse_titles=options.use_terse_plot_titles)
else:
print('NOT plotting initial curve')
op.savefig_and_clear(filename=base_filename+'.'+options.plot_type)
# Put together the data to pickle
pickle_data = smo.data_to_pickle()
pickle_data['full_commandline'] = heisenberg.util.reconstruct_full_commandline(executable=sys.executable, argv=sys.argv)
pickle_data['options'] = vars(options) # vars ensures it's a dict, and not a stupid optparse.Values object.
vorpy.pickle.try_to_pickle(data=pickle_data, pickle_filename=base_filename+'.pickle')
# Also create a human-readable summary of the pickle data.
heisenberg.util.write_human_readable_summary(data=pickle_data, filename=base_filename+'.summary')
|
Deep-Abstract/PyMVC
|
refs/heads/master
|
daoDetails.py
|
2
|
dbargs = {'user': 'your_username',
'password': 'your_passwd',
'db': 'your_db_name',
'host': 'localhost'}
|
cloudbase/maas
|
refs/heads/master
|
src/maastesting/tests/test_scenarios.py
|
1
|
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `maastesting.scenarios`."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
import unittest
from maastesting.scenarios import WithScenarios
from maastesting.testcase import MAASTestCase
class TestWithScenarios(MAASTestCase):
def test_scenarios_applied(self):
# Scenarios are applied correctly when a test is called via __call__()
# instead of run().
events = []
class Test(WithScenarios, unittest.TestCase):
scenarios = [
("one", dict(token="one")),
("two", dict(token="two")),
]
def test(self):
events.append(self.token)
test = Test("test")
test.__call__()
self.assertEqual(["one", "two"], events)
def test_scenarios_applied_by_call(self):
# Scenarios are applied by __call__() when it is called first, and not
# by run().
events = []
class Test(WithScenarios, unittest.TestCase):
scenarios = [
("one", dict(token="one")),
("two", dict(token="two")),
]
def test(self):
events.append(self.token)
def run(self, result=None):
# Call-up right past WithScenarios.run() to show that it is
# not responsible for applying scenarios, and __call__() is.
super(WithScenarios, self).run(result)
test = Test("test")
test.__call__()
self.assertEqual(["one", "two"], events)
|
petecummings/django
|
refs/heads/master
|
tests/utils_tests/test_glob.py
|
331
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.glob import glob_escape
class TestUtilsGlob(SimpleTestCase):
def test_glob_escape(self):
filename = '/my/file?/name[with special chars*'
expected = '/my/file[?]/name[[]with special chars[*]'
filename_b = b'/my/file?/name[with special chars*'
expected_b = b'/my/file[?]/name[[]with special chars[*]'
self.assertEqual(glob_escape(filename), expected)
self.assertEqual(glob_escape(filename_b), expected_b)
|
chouseknecht/ansible
|
refs/heads/devel
|
lib/ansible/modules/clustering/consul_kv.py
|
19
|
#!/usr/bin/python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# (c) 2018 Genome Research Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_kv
short_description: Manipulate entries in the key/value store of a consul cluster
description:
- Allows the retrieval, addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
the indices, flags and session are returned as C(value).
- If the C(key) represents a prefix then note that when a value is removed, the existing
value if any is returned as part of the results.
- See http://www.consul.io/docs/agent/http.html#kv for more details.
requirements:
- python-consul
- requests
version_added: "2.0"
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
options:
state:
description:
- The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
contents will be set to the value supplied and `changed` will be set to `true` only if the value was
different to the current contents. If the state is 'present' and `value` is not set, the existing value
associated to the key will be returned. The state 'absent' will remove the key/value pair,
again 'changed' will be set to true only if the key actually existed
prior to the removal. An attempt can be made to obtain or free the
lock associated with a key/value pair with the states 'acquire' or
'release' respectively. a valid session must be supplied to make the
attempt changed will be true if the attempt is successful, false
otherwise.
choices: [ absent, acquire, present, release ]
default: present
key:
description:
- The key at which the value should be stored.
type: str
required: yes
value:
description:
- The value should be associated with the given key, required if C(state)
is C(present).
type: str
required: yes
recurse:
description:
- If the key represents a prefix, each entry with the prefix can be
retrieved by setting this to C(yes).
type: bool
default: 'no'
retrieve:
description:
- If the I(state) is C(present) and I(value) is set, perform a
read after setting the value and return this value.
default: True
type: bool
session:
description:
- The session that should be used to acquire or release a lock
associated with a key/value pair.
type: str
token:
description:
- The token key identifying an ACL rule set that controls access to
the key value pair
type: str
cas:
description:
- Used when acquiring a lock with a session. If the C(cas) is C(0), then
Consul will only put the key if it does not already exist. If the
C(cas) value is non-zero, then the key is only set if the index matches
the ModifyIndex of that key.
type: str
flags:
description:
- Opaque positive integer value that can be passed when setting a value.
type: str
host:
description:
- Host of the consul agent.
type: str
default: localhost
port:
description:
- The port on which the consul agent is running.
type: int
default: 8500
scheme:
description:
- The protocol scheme on which the consul agent is running.
type: str
default: http
version_added: "2.1"
validate_certs:
description:
- Whether to verify the tls certificate of the consul agent.
type: bool
default: 'yes'
version_added: "2.1"
"""
EXAMPLES = '''
# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
- name: retrieve a value from the key/value store
consul_kv:
key: somekey
register: retrieved_key
- name: Add or update the value associated with a key in the key/value store
consul_kv:
key: somekey
value: somevalue
- name: Remove a key from the store
consul_kv:
key: somekey
state: absent
- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
consul_kv:
key: ansible/groups/dc1/somenode
value: top_secret
- name: Register a key/value pair with an associated session
consul_kv:
key: stg/node/server_birthday
value: 20160509
session: "{{ sessionid }}"
state: acquire
'''
from ansible.module_utils._text import to_text
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
NOT_SET = None
def _has_value_changed(consul_client, key, target_value):
"""
Uses the given Consul client to determine if the value associated to the given key is different to the given target
value.
:param consul_client: Consul connected client
:param key: key in Consul
:param target_value: value to be associated to the key
:return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
value has changed (i.e. the stored value is not the target value)
"""
index, existing = consul_client.kv.get(key)
if not existing:
return index, True
try:
changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
return index, changed
except UnicodeError:
# Existing value was not decodable but all values we set are valid utf-8
return index, True
def execute(module):
state = module.params.get('state')
if state == 'acquire' or state == 'release':
lock(module, state)
elif state == 'present':
if module.params.get('value') is NOT_SET:
get_value(module)
else:
set_value(module)
elif state == 'absent':
remove_value(module)
else:
module.exit_json(msg="Unsupported state: %s" % (state, ))
def lock(module, state):
consul_api = get_consul_api(module)
session = module.params.get('session')
key = module.params.get('key')
value = module.params.get('value')
if not session:
module.fail(
msg='%s of lock for %s requested but no session supplied' %
(state, key))
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
if state == 'acquire':
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
acquire=session,
flags=module.params.get('flags'))
else:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
release=session,
flags=module.params.get('flags'))
module.exit_json(changed=changed,
index=index,
key=key)
def get_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
module.exit_json(changed=False, index=index, data=existing_value)
def set_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
value = module.params.get('value')
if value is NOT_SET:
raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
flags=module.params.get('flags'))
stored = None
if module.params.get('retrieve'):
index, stored = consul_api.kv.get(key)
module.exit_json(changed=changed,
index=index,
key=key,
data=stored)
def remove_value(module):
''' remove the value associated with the given key. if the recurse parameter
is set then any key prefixed with the given key will be removed. '''
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing = consul_api.kv.get(
key, recurse=module.params.get('recurse'))
changed = existing is not None
if changed and not module.check_mode:
consul_api.kv.delete(key, module.params.get('recurse'))
module.exit_json(changed=changed,
index=index,
key=key,
data=existing)
def get_consul_api(module, token=None):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
token=module.params.get('token'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "
"see https://python-consul.readthedocs.io/en/latest/#installation")
def main():
module = AnsibleModule(
argument_spec=dict(
cas=dict(type='str'),
flags=dict(type='str'),
key=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
scheme=dict(type='str', default='http'),
validate_certs=dict(type='bool', default=True),
port=dict(type='int', default=8500),
recurse=dict(type='bool'),
retrieve=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
token=dict(type='str', no_log=True),
value=dict(type='str', default=NOT_SET),
session=dict(type='str'),
),
supports_check_mode=True
)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), e))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
kewu1992/test-infra
|
refs/heads/master
|
gubernator/third_party/defusedxml/xmlrpc.py
|
53
|
# defusedxml
#
# Copyright (c) 2013 by Christian Heimes <christian@python.org>
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
"""Defused xmlrpclib
Also defuses gzip bomb
"""
from __future__ import print_function, absolute_import
import io
from .common import (DTDForbidden, EntitiesForbidden,
ExternalReferenceForbidden, PY3, PY31, PY26)
if PY3:
__origin__ = "xmlrpc.client"
from xmlrpc.client import ExpatParser
from xmlrpc import client as xmlrpc_client
from xmlrpc import server as xmlrpc_server
if not PY31:
from xmlrpc.client import gzip_decode as _orig_gzip_decode
from xmlrpc.client import GzipDecodedResponse as _OrigGzipDecodedResponse
else:
__origin__ = "xmlrpclib"
from xmlrpclib import ExpatParser
import xmlrpclib as xmlrpc_client
xmlrpc_server = None
if not PY26:
from xmlrpclib import gzip_decode as _orig_gzip_decode
from xmlrpclib import GzipDecodedResponse as _OrigGzipDecodedResponse
try:
import gzip
except ImportError:
gzip = None
# Limit maximum request size to prevent resource exhaustion DoS
# Also used to limit maximum amount of gzip decoded data in order to prevent
# decompression bombs
# A value of -1 or smaller disables the limit
MAX_DATA = 30 * 1024 * 1024 # 30 MB
def defused_gzip_decode(data, limit=None):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
if limit is None:
limit = MAX_DATA
f = io.BytesIO(data)
gzf = gzip.GzipFile(mode="rb", fileobj=f)
try:
if limit < 0: # no limit
decoded = gzf.read()
else:
decoded = gzf.read(limit + 1)
except IOError:
raise ValueError("invalid data")
f.close()
gzf.close()
if limit >= 0 and len(decoded) > limit:
raise ValueError("max gzipped payload length exceeded")
return decoded
class DefusedGzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response, limit=None):
#response doesn't support tell() and read(), required by
#GzipFile
if not gzip:
raise NotImplementedError
self.limit = limit = limit if limit is not None else MAX_DATA
if limit < 0: # no limit
data = response.read()
self.readlength = None
else:
data = response.read(limit + 1)
self.readlength = 0
if limit >= 0 and len(data) > limit:
raise ValueError("max payload length exceeded")
self.stringio = io.BytesIO(data)
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.stringio)
def read(self, n):
if self.limit >= 0:
left = self.limit - self.readlength
n = min(n, left + 1)
data = gzip.GzipFile.read(self, n)
self.readlength += len(data)
if self.readlength > self.limit:
raise ValueError("max payload length exceeded")
return data
else:
return gzip.GzipFile.read(self, n)
def close(self):
gzip.GzipFile.close(self)
self.stringio.close()
class DefusedExpatParser(ExpatParser):
def __init__(self, target, forbid_dtd=False, forbid_entities=True,
forbid_external=True):
ExpatParser.__init__(self, target)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
parser = self._parser
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
def defused_start_doctype_decl(self, name, sysid, pubid,
has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid,
notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def defused_external_entity_ref_handler(self, context, base, sysid,
pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def monkey_patch():
xmlrpc_client.FastParser = DefusedExpatParser
if PY26 or PY31:
# Python 2.6 and 3.1 have no gzip support in xmlrpc
return
xmlrpc_client.GzipDecodedResponse = DefusedGzipDecodedResponse
xmlrpc_client.gzip_decode = defused_gzip_decode
if xmlrpc_server:
xmlrpc_server.gzip_decode = defused_gzip_decode
def unmonkey_patch():
xmlrpc_client.FastParser = None
if PY26 or PY31:
return
xmlrpc_client.GzipDecodedResponse = _OrigGzipDecodedResponse
xmlrpc_client.gzip_decode = _orig_gzip_decode
if xmlrpc_server:
xmlrpc_server.gzip_decode = _orig_gzip_decode
|
renard/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/async_status.py
|
4
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
_VALID_ARGS = frozenset(('jid', 'mode'))
def run(self, tmp=None, task_vars=None):
results = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if "jid" not in self._task.args:
raise AnsibleError("jid is required")
jid = self._task.args["jid"]
mode = self._task.args.get("mode", "status")
env_async_dir = [e for e in self._task.environment if
"ANSIBLE_ASYNC_DIR" in e]
if len(env_async_dir) > 0:
# for backwards compatibility we need to get the dir from
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
# deprecated and will be removed in favour of shell options
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
msg = "Setting the async dir from the environment keyword " \
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
"shell option instead"
self._display.deprecated(msg, "2.12", collection_name='ansible.builtin')
else:
# inject the async directory based on the shell option into the
# module args
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
module_args = dict(jid=jid, mode=mode, _async_dir=async_dir)
status = self._execute_module(task_vars=task_vars,
module_args=module_args)
results = merge_hash(results, status)
return results
|
ack8006/Python-mode-klen
|
refs/heads/master
|
pymode/libs2/rope/base/prefs.py
|
91
|
class Prefs(object):
def __init__(self):
self.prefs = {}
self.callbacks = {}
def set(self, key, value):
"""Set the value of `key` preference to `value`."""
if key in self.callbacks:
self.callbacks[key](value)
else:
self.prefs[key] = value
def add(self, key, value):
"""Add an entry to a list preference
Add `value` to the list of entries for the `key` preference.
"""
if not key in self.prefs:
self.prefs[key] = []
self.prefs[key].append(value)
def get(self, key, default=None):
"""Get the value of the key preference"""
return self.prefs.get(key, default)
def add_callback(self, key, callback):
"""Add `key` preference with `callback` function
Whenever `key` is set the callback is called with the
given `value` as parameter.
"""
self.callbacks[key] = callback
def __setitem__(self, key, value):
self.set(key, value)
def __getitem__(self, key):
return self.get(key)
|
Sparker0i/fosswebsite
|
refs/heads/master
|
technical_resources/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
kennedyshead/home-assistant
|
refs/heads/dev
|
homeassistant/components/binary_sensor/significant_change.py
|
5
|
"""Helper to test significant Binary Sensor state changes."""
from __future__ import annotations
from typing import Any
from homeassistant.core import HomeAssistant, callback
@callback
def async_check_significant_change(
hass: HomeAssistant,
old_state: str,
old_attrs: dict,
new_state: str,
new_attrs: dict,
**kwargs: Any,
) -> bool | None:
"""Test if state significantly changed."""
if old_state != new_state:
return True
return False
|
tjanez/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_ip_interface.py
|
11
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_ip_interface
version_added: "2.1"
short_description: Manages L3 attributes for IPv4 and IPv6 interfaces.
description:
- Manages Layer 3 attributes for IPv4 and IPv6 interfaces.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Interface must already be a L3 port when using this module.
- Logical interfaces (po, loop, svi) must be created first.
- C(mask) must be inserted in decimal format (i.e. 24) for
both IPv6 and IPv4.
- A single interface can have multiple IPv6 configured.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1, vlan10.
required: true
addr:
description:
- IPv4 or IPv6 Address.
required: false
default: null
mask:
description:
- Subnet mask for IPv4 or IPv6 Address in decimal format.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure ipv4 address is configured on Ethernet1/32
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
addr: 20.20.20.20
mask: 24
- name: Ensure ipv6 address is configured on Ethernet1/31
nxos_ip_interface:
interface: Ethernet1/31
transport: cli
version: v6
state: present
addr: '2001::db8:800:200c:cccb'
mask: 64
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"addr": "20.20.20.20", "interface": "ethernet1/32", "mask": "24"}
existing:
description: k/v pairs of existing IP attributes on the interface
type: dict
sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17}],
"interface": "ethernet1/32", "prefix": "11.11.0.0",
"type": "ethernet", "vrf": "default"}
end_state:
description: k/v pairs of IP attributes after module execution
returned: always
type: dict
sample: {"addresses": [{"addr": "20.20.20.20", "mask": 24}],
"interface": "ethernet1/32", "prefix": "20.20.20.0",
"type": "ethernet", "vrf": "default"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface ethernet1/32", "ip address 20.20.20.20/24"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
if isinstance(body, str):
if 'invalid interface format' in body.lower():
module.fail_json(msg='Invalid interface name. Please check '
'its format.', interface=interface)
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode
def send_show_command(interface_name, version, module):
if version == 'v4':
command = 'show ip interface {0}'.format(interface_name)
elif version == 'v6':
command = 'show ipv6 interface {0}'.format(interface_name)
if module.params['transport'] == 'nxapi' and version == 'v6':
body = execute_show_command(command, module,
command_type='cli_show_ascii')
else:
body = execute_show_command(command, module)
return body
def parse_structured_data(body, interface_name, version, module):
address_list = []
interface_key = {
'subnet': 'prefix',
'prefix': 'prefix'
}
try:
interface_table = body[0]['TABLE_intf']['ROW_intf']
try:
vrf_table = body[0]['TABLE_vrf']['ROW_vrf']
vrf = vrf_table['vrf-name-out']
except KeyError:
vrf = None
except (KeyError, AttributeError):
return {}
interface = apply_key_map(interface_key, interface_table)
interface['interface'] = interface_name
interface['type'] = get_interface_type(interface_name)
interface['vrf'] = vrf
if version == 'v4':
address = {}
address['addr'] = interface_table.get('prefix', None)
if address['addr'] is not None:
address['mask'] = str(interface_table.get('masklen', None))
interface['addresses'] = [address]
prefix = "{0}/{1}".format(address['addr'], address['mask'])
address_list.append(prefix)
else:
interface['addresses'] = []
elif version == 'v6':
address_list = interface_table.get('addr', [])
interface['addresses'] = []
if address_list:
if not isinstance(address_list, list):
address_list = [address_list]
for ipv6 in address_list:
address = {}
splitted_address = ipv6.split('/')
address['addr'] = splitted_address[0]
address['mask'] = splitted_address[1]
interface['addresses'].append(address)
else:
interface['addresses'] = []
return interface, address_list
def parse_unstructured_data(body, interface_name, module):
interface = {}
address_list = []
vrf = None
body = body[0]
if "ipv6 is disabled" not in body.lower():
splitted_body = body.split('\n')
# We can have multiple IPv6 on the same interface.
# We need to parse them manually from raw output.
for index in range(0, len(splitted_body) - 1):
if "IPv6 address:" in splitted_body[index]:
first_reference_point = index + 1
elif "IPv6 subnet:" in splitted_body[index]:
last_reference_point = index
prefix_line = splitted_body[last_reference_point]
prefix = prefix_line.split('IPv6 subnet:')[1].strip()
interface['prefix'] = prefix
interface_list_table = splitted_body[
first_reference_point:last_reference_point]
for each_line in interface_list_table:
address = each_line.strip().split(' ')[0]
if address not in address_list:
address_list.append(address)
interface['addresses'] = []
if address_list:
for ipv6 in address_list:
address = {}
splitted_address = ipv6.split('/')
address['addr'] = splitted_address[0]
address['mask'] = splitted_address[1]
interface['addresses'].append(address)
try:
vrf_regex = '.*VRF\s+(?P<vrf>\S+).*'
match_vrf = re.match(vrf_regex, body, re.DOTALL)
group_vrf = match_vrf.groupdict()
vrf = group_vrf["vrf"]
except AttributeError:
vrf = None
else:
# IPv6's not been configured on this interface yet.
interface['addresses'] = []
interface['interface'] = interface_name
interface['type'] = get_interface_type(interface_name)
interface['vrf'] = vrf
return interface, address_list
def get_ip_interface(interface_name, version, module):
body = send_show_command(interface_name, version, module)
# nxapi default response doesn't reflect the actual interface state
# when dealing with IPv6. That's why we need to get raw output instead
# and manually parse it.
if module.params['transport'] == 'nxapi' and version == 'v6':
interface, address_list = parse_unstructured_data(
body, interface_name, module)
else:
interface, address_list = parse_structured_data(
body, interface_name, version, module)
return interface, address_list
def get_remove_ip_config_commands(interface, addr, mask, version):
commands = []
commands.append('interface {0}'.format(interface))
if version == 'v4':
commands.append('no ip address')
else:
commands.append('no ipv6 address {0}/{1}'.format(addr, mask))
return commands
def get_config_ip_commands(delta, interface, existing, version):
commands = []
delta = dict(delta)
# loop used in the situation that just an IP address or just a
# mask is changing, not both.
for each in ['addr', 'mask']:
if each not in delta:
delta[each] = existing[each]
if version == 'v4':
command = 'ip address {addr}/{mask}'.format(**delta)
else:
command = 'ipv6 address {addr}/{mask}'.format(**delta)
commands.append(command)
commands.insert(0, 'interface {0}'.format(interface))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(addr, interface, mask, version, state, intf_type, module):
if state == "present":
if addr is None or mask is None:
module.fail_json(msg="An IP address AND a mask must be provided "
"when state=present.")
elif state == "absent" and version == "v6":
if addr is None or mask is None:
module.fail_json(msg="IPv6 address and mask must be provided when "
"state=absent.")
if (intf_type != "ethernet" and module.params["transport"] == "cli"):
if is_default(interface, module) == "DNE":
module.fail_json(msg="That interface does not exist yet. Create "
"it first.", interface=interface)
if mask is not None:
try:
if (int(mask) < 1 or int(mask) > 32) and version == "v4":
raise ValueError
elif int(mask) < 1 or int(mask) > 128:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'mask' must be an integer between"
" 1 and 32 when version v4 and up to 128 "
"when version v6.", version=version,
mask=mask)
def main():
argument_spec = dict(
interface=dict(required=True),
addr=dict(required=False),
version=dict(required=False, choices=['v4', 'v6'],
default='v4'),
mask=dict(type='str', required=False),
state=dict(required=False, default='present',
choices=['present', 'absent']),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
addr = module.params['addr']
version = module.params['version']
mask = module.params['mask']
interface = module.params['interface'].lower()
state = module.params['state']
intf_type = get_interface_type(interface)
validate_params(addr, interface, mask, version, state, intf_type, module)
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
existing, address_list = get_ip_interface(interface, version, module)
args = dict(addr=addr, mask=mask, interface=interface)
proposed = dict((k, v) for k, v in args.items() if v is not None)
commands = []
changed = False
end_state = existing
if state == 'absent' and existing['addresses']:
if version == 'v6':
for address in existing['addresses']:
if address['addr'] == addr and address['mask'] == mask:
command = get_remove_ip_config_commands(interface, addr,
mask, version)
commands.append(command)
else:
command = get_remove_ip_config_commands(interface, addr,
mask, version)
commands.append(command)
elif state == 'present':
if not existing['addresses']:
command = get_config_ip_commands(proposed, interface,
existing, version)
commands.append(command)
else:
prefix = "{0}/{1}".format(addr, mask)
if prefix not in address_list:
command = get_config_ip_commands(proposed, interface,
existing, version)
commands.append(command)
else:
for address in existing['addresses']:
if (address['addr'] == addr and
int(address['mask']) != int(mask)):
command = get_config_ip_commands(proposed, interface,
existing, version)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state, address_list = get_ip_interface(interface, version,
module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
team-items/MissionControl-Server
|
refs/heads/master
|
MC2ML/MC2ML_Parser.py
|
1
|
import sys
import os
import xml.etree.ElementTree as ET
MC2MLPath = os.path.dirname(os.path.abspath(__file__))
#direct use
flags = []
includes = []
variables = []
functions = []
dataFields = []
controlFields = []
getters = []
actions = []
inits = []
integers = []
floats = []
bools = []
groups = []
sliders = []
buttons = []
information_dotset_string_lao = 'json_object_dotset_string(root_object_lao, "ConnLAO.Information.'
information_dotset_number_lao = 'json_object_dotset_number(root_object_lao, "ConnLAO.Information.'
control_dotset_string_lao = 'json_object_dotset_string(root_object_lao, "ConnLAO.Controller.'
control_dotset_number_lao = 'json_object_dotset_number(root_object_lao, "ConnLAO.Controller.'
data_dotset_string = 'json_object_dotset_string(root_object, "Data.'
data_dotset_number = 'json_object_dotset_number(root_object, "Data.'
def printChilds(root):
for child in root:
print(child.tag)
printChilds(child)
def grabLibraryElements(root):
for library in root.findall("Libraries"):
for child in library:
if child.tag == "Flag":
flags.append(child.text)
if child.tag == "Include":
includes.append("#include <"+child.text+">")
def grabMonitorElements(root):
for monitor in root.findall("Monitor"):
for Integers in monitor.findall("Integer"):
for intVal in Integers.findall("IntegerValue"):
integers.append(intVal)
for Floats in monitor.findall("Float"):
for floatVal in Floats.findall("FloatValue"):
floats.append(floatVal)
for Bools in monitor.findall("Bool"):
for boolVal in Bools.findall("BoolValue"):
bools.append(boolVal)
def grabSupportElements(root):
for support in root.findall("Support"):
for variable in support.findall("Variable"):
variables.append(variable.text.strip())
for function in support.findall("Function"):
functions.append(function.text.strip().replace("\t", "").strip())
def grabControlElements(root):
for control in root.findall("Control"):
for group in control.findall("Group"):
groups.append(group)
for slider in control.findall("Slider"):
sliders.append(slider)
for button in control.findall("Button"):
buttons.append(button)
def grabInit(root):
for init in root.findall("Init"):
inits.append(init.text.replace("\t", "").replace(" ", "")+"\n")
def grabElements(path):
tree = ET.parse(MC2MLPath+"/controller_files/"+path)
root = tree.getroot()
grabLibraryElements(root)
grabMonitorElements(root)
grabSupportElements(root)
grabControlElements(root)
grabInit(root)
def generateVariables():
for slider in sliders:
variables.append(createSliderVariableDefinition(slider))
for group in groups:
slider = group.find('Slider')
variables.append(createSliderVariableDefinition(slider))
def getSliderName(slider):
return slider.find('Name').text.replace(' ', '_')
def getSliderMinValue(slider):
return slider.find('MinBound').text
def createSliderVariableDefinition(slider):
return "int "+getSliderName(slider)+" = "+getSliderMinValue(slider)+";"
def generateNumberField(field, type):
prefix = field.find('Name').text
minbound = field.find('MinBound')
maxbound = field.find('MaxBound')
graph = field.find('Graph')
if (type == "Float" or type == "Integer") and (minbound == None or maxbound == None):
raise Exception("Min and maxbound required for Float and Integer")
returnVal = information_dotset_string_lao+type+'.'+prefix+'.DataType", "'+type+'");'
if minbound != None:
returnVal += '\n'+information_dotset_number_lao+type+'.'+prefix+'.MinBound", '+minbound.text+');'
if maxbound != None:
returnVal += '\n'+information_dotset_number_lao+type+'.'+prefix+'.MaxBound", '+maxbound.text+');'
if graph != None:
returnVal += '\n'+information_dotset_number_lao+type+'.'+prefix+'.Graph", '+graph.text+');'
return returnVal
def generateIntegerField(integerField):
return generateNumberField(integerField, 'Integer')
def generateFloatField(floatField):
return generateNumberField(floatField, 'Float')
def generateBoolField(field):
return generateNumberField(field, 'Bool')
def generateButtonControl(field, prefixAddition = ""):
prefix = field.find('Name').text
if prefixAddition != "":
prefix = prefixAddition+"."+prefix
descriptor = field.find('Descriptor')
returnVal = control_dotset_string_lao+prefix+'.ControlType", "Button");'
if descriptor != None:
returnVal += '\n'+control_dotset_string_lao+prefix+'.Descriptor", "'+descriptor.text+'");'
return returnVal
def generateSliderControl(field, prefixAddition = ""):
prefix = field.find('Name').text
if prefixAddition != "":
prefix = prefixAddition+"."+prefix
minbound = field.find('MinBound')
maxbound = field.find('MaxBound')
if (minbound == None or maxbound == None):
raise Exception("Min and maxbound required for Slider")
returnVal = control_dotset_string_lao+prefix+'.ControlType", "Slider");'
returnVal += '\n'+control_dotset_number_lao+prefix+'.MinBound", '+minbound.text+');'
returnVal += '\n'+control_dotset_number_lao+prefix+'.MaxBound", '+maxbound.text+');'
return returnVal
def generateGetter(field, type):
returnVal = ""
if type == 'Float' or type == 'Integer':
returnVal = data_dotset_number
else:
returnVal = data_dotset_string
return returnVal+field.find('Name').text+'", '+field.find('Getter').text+');'
def button_dotget(name):
return 'json_object_dotget_string(input, "Control.'+name+'")'
def slider_dotget(name):
return 'json_object_dotget_number(input, "Control.'+name+'")'
def generateButtonAction(field):
name = field.find('Name').text
action = field.find('Action')
result = "if(("+button_dotget(name)+") != NULL){"
if action.text != None and action.text != None:
result+="\n"+action.text.replace('\t', "")
return result + "\n}"
def generateSliderAction(field):
name = field.find('Name').text
action = field.find('Action')
result = "if(((int)"+slider_dotget(name)+") != 0){"
result += "\n"+getSliderName(field)+" = (int)"+slider_dotget(name)+";"
if action != None and action.text != None:
result+="\n"+action.text.replace('\t', "")
return result + "\n}"
def grabDataFields():
for integerField in integers:
dataFields.append(generateIntegerField(integerField))
for floatField in floats:
dataFields.append(generateFloatField(floatField))
for boolField in bools:
dataFields.append(generateBoolField(boolField))
def grabControlFields():
for button in buttons:
controlFields.append(generateButtonControl(button))
for group in groups:
button = group.find('Button')
controlFields.append(generateButtonControl(button, group.find('Name').text))
for slider in sliders:
controlFields.append(generateSliderControl(slider))
for group in groups:
slider = group.find('Slider')
controlFields.append(generateSliderControl(slider, group.find('Name').text))
def grabGetters():
for field in integers:
getters.append(generateGetter(field, 'Integer'))
for field in floats:
getters.append(generateGetter(field, 'Float'))
for field in bools:
getters.append(generateGetter(field, 'Bool'))
def grabActions():
for slider in sliders:
actions.append(generateSliderAction(slider))
for group in groups:
actions.append(generateSliderAction(group.find('Slider')))
for button in buttons:
actions.append(generateButtonAction(button))
for group in groups:
actions.append(generateButtonAction(group.find('Button')))
def generateString(arr):
result = ""
for str in arr:
result+=str+"\n"
return result
def generateCompileLine():
result = "gcc -o RSAL/RSAL RSAL/RSAL.c"
for flag in flags:
result += " "+flag
return result
########################
if len(sys.argv) > 1:
grabElements(sys.argv[1])
generateVariables()
grabDataFields()
grabControlFields()
grabGetters()
grabActions()
with open(MC2MLPath+'/ControllerTemplate', 'r') as template:
templateString=template.read()
templateString = templateString.replace('<CustomIncludes>', generateString(includes))
templateString = templateString.replace('<Variables>', generateString(variables))
templateString = templateString.replace('<Functions>', generateString(functions))
templateString = templateString.replace('<DataFields>', generateString(dataFields))
templateString = templateString.replace('<ControlFields>', generateString(controlFields))
templateString = templateString.replace('<Getters>', generateString(getters))
templateString = templateString.replace('<Actions>', generateString(actions))
templateString = templateString.replace('<Init>', generateString(inits))
result = open(MC2MLPath+'/../RSAL/CONTROLLER.c','w')
result.write(templateString)
result.close()
if len(sys.argv) > 2 and sys.argv[2] == "DEBUG":
os.system("cd "+MC2MLPath+"/.. && "+generateCompileLine())
else:
os.system("cd "+MC2MLPath+"/.. && "+generateCompileLine()+ " && rm RSAL/CONTROLLER.c")
|
netgroup/wmSDN
|
refs/heads/master
|
controller/other_modules/utill.py
|
21
|
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions
"""
from __future__ import print_function
import traceback
import struct
import sys
import os
import time
import socket
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
def set_extend (l, index, item, emptyValue = None):
"""
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def str_to_dpid (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
if s.lower().startswith("0x"):
s = s[2:]
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
if a > 0xffFFffFFffFF:
b = a >> 48
a &= 0xffFFffFFffFF
else:
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
strToDPID = str_to_dpid
def dpid_to_str (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
dpidToStr = dpid_to_str # Deprecated
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2],
stack[-3][0], stack[-3][1])
type_msg = ("%s must be instance of %s (but is %s)"
% (name, allowed_types , str(type(obj))))
raise AssertionError(stack_msg + ": " + type_msg)
def initHelper (obj, kw):
"""
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
def makePinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
def __repr__ (self):
return "<%s %i/%i>" % (self.__class__.__name__, self._w, self._r)
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
def __repr__ (self):
return "<%s %s/%s>" % (self.__class__.__name__, self._w, self._r)
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
localaddress = '127.127.127.127'
startPort = 10000
import socket
import select
def tryConnect ():
l = socket.socket()
l.setblocking(0)
port = startPort
while True:
try:
l.bind( (localaddress, port) )
break
except:
port += 1
if port - startPort > 1000:
raise RuntimeError("Could not find a free socket")
l.listen(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) == 0:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(1)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
if isinstance(data, str):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' %-16s' % (t,)
o += l
return o
def connect_socket_with_backoff (address, port, max_backoff_seconds=32):
'''
Connect to the given address and port. If the connection attempt fails,
exponentially back off, up to the max backoff
return the connected socket, or raise an exception if the connection
was unsuccessful
'''
backoff_seconds = 1
sock = None
print("connect_socket_with_backoff(address=%s, port=%d)"
% (address, port), file=sys.stderr)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print("%s. Backing off %d seconds ..." % (str(e), backoff_seconds),
file=sys.stderr)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d"
% (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
_scalar_types = (int, long, basestring, float, bool)
def is_scalar (v):
return isinstance(v, _scalar_types)
def fields_of (obj, primitives_only=False,
primitives_and_composites_only=False, allow_caps=False):
"""
Returns key/value pairs of things that seem like public fields of an object.
"""
#NOTE: The above docstring isn't split into two lines on purpose.
r = {}
for k in dir(obj):
if k.startswith('_'): continue
v = getattr(obj, k)
if hasattr(v, '__call__'): continue
if not allow_caps and k.upper() == k: continue
if primitives_only:
if not isinstance(v, _scalar_types):
continue
elif primitives_and_composites_only:
if not isinstance(v, (int, long, basestring, float, bool, set,
dict, list)):
continue
#r.append((k,v))
r[k] = v
return r
if __name__ == "__main__":
#TODO: move to tests?
def cb (t,k,v): print(v)
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print(l)
|
Endika/odoo
|
refs/heads/8.0
|
doc/conf.py
|
184
|
# -*- coding: utf-8 -*-
import sys, os
import sphinx
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
DIR = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(DIR, '_extensions')))
# autodoc
sys.path.append(os.path.abspath(os.path.join(DIR, '..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'github_link',
'odoo',
'html_domain',
'exercise_admonition',
'patchqueue'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odoo'
copyright = u'Odoo S.A.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.0'
# The full version, including alpha/beta/rc tags.
release = '8.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'odoo'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'odoo'
odoo_cover_default = 'banners/installing_odoo.jpg'
odoo_cover_external = {
'https://odoo.com/documentation/functional/accounting.html' : 'banners/m_accounting.jpg',
'https://odoo.com/documentation/functional/double-entry.html' : 'banners/m_1.jpg',
'https://odoo.com/documentation/functional/valuation.html' : 'banners/m_2.jpg',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_extensions']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = u''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# FIXME: no sidebar on index?
html_sidebars = {
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
latex_elements = {
'papersize': r'a4paper',
'preamble': u'''\\setcounter{tocdepth}{2}
''',
}
# default must be set otherwise ifconfig blows up
todo_include_todos = False
intersphinx_mapping = {
'python': ('https://docs.python.org/2/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'django': ('https://django.readthedocs.org/en/latest/', None),
}
github_user = 'odoo'
github_project = 'odoo'
# monkeypatch PHP lexer to not require <?php
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
def setup(app):
app.connect('html-page-context', canonicalize)
app.add_config_value('canonical_root', None, 'env')
app.add_config_value('canonical_branch', 'master', 'env')
app.connect('html-page-context', versionize)
app.add_config_value('versions', '', 'env')
app.connect('html-page-context', analytics)
app.add_config_value('google_analytics_key', '', 'env')
def canonicalize(app, pagename, templatename, context, doctree):
""" Adds a 'canonical' URL for the current document in the rendering
context. Requires the ``canonical_root`` setting being set. The canonical
branch is ``master`` but can be overridden using ``canonical_branch``.
"""
if not app.config.canonical_root:
return
context['canonical'] = _build_url(
app.config.canonical_root, app.config.canonical_branch, pagename)
def versionize(app, pagename, templatename, context, doctree):
""" Adds a version switcher below the menu, requires ``canonical_root``
and ``versions`` (an ordered, space-separated lists of all possible
versions).
"""
if not (app.config.canonical_root and app.config.versions):
return
context['versions'] = [
(vs, _build_url(app.config.canonical_root, vs, pagename))
for vs in app.config.versions.split(',')
if vs != app.config.version
]
def analytics(app, pagename, templatename, context, doctree):
if not app.config.google_analytics_key:
return
context['google_analytics_key'] = app.config.google_analytics_key
def _build_url(root, branch, pagename):
return "{canonical_url}{canonical_branch}/{canonical_page}".format(
canonical_url=root,
canonical_branch=branch,
canonical_page=(pagename + '.html').replace('index.html', '')
.replace('index/', ''),
)
|
epssy/hue
|
refs/heads/master
|
apps/beeswax/src/beeswax/forms.py
|
21
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from django.core.validators import MinValueValidator, MaxValueValidator
from django.forms import NumberInput
from desktop.lib.django_forms import simple_formset_factory, DependencyAwareForm
from desktop.lib.django_forms import ChoiceOrOtherField, MultiForm, SubmitButton
from filebrowser.forms import PathField
from beeswax import common
from beeswax.models import SavedQuery
class QueryForm(MultiForm):
def __init__(self):
super(QueryForm, self).__init__(
query=HQLForm,
settings=SettingFormSet,
file_resources=FileResourceFormSet,
functions=FunctionFormSet,
saveform=SaveForm
)
class SaveForm(forms.Form):
"""Used for saving query design."""
name = forms.CharField(required=False,
max_length=64,
initial=SavedQuery.DEFAULT_NEW_DESIGN_NAME,
help_text=_t('Change the name to save as a new design.'))
desc = forms.CharField(required=False, max_length=1024, label=_t("Description"))
save = forms.BooleanField(widget=SubmitButton, required=False)
saveas = forms.BooleanField(widget=SubmitButton, required=False)
def __init__(self, *args, **kwargs):
forms.Form.__init__(self, *args, **kwargs)
self.fields['save'].label = _t('Save')
self.fields['save'].widget.label = _('Save')
self.fields['saveas'].label = _t('Save As')
self.fields['saveas'].widget.label = _('Save As')
def clean_name(self):
return self.cleaned_data.get('name', '').strip()
def clean(self):
cleaned_data = super(SaveForm, self).clean()
if self.errors:
return
save = cleaned_data.get('save')
name = cleaned_data.get('name')
if save and not name:
# Bother with name iff we're saving
raise forms.ValidationError(_('Enter a name.'))
return cleaned_data
def set_data(self, name, desc=''):
"""Set the name and desc programmatically"""
data2 = self.data.copy()
data2[self.add_prefix('name')] = name
data2[self.add_prefix('desc')] = desc
self.data = data2
class SaveResultsDirectoryForm(forms.Form):
"""Used for saving the query result data to hdfs directory"""
target_dir = forms.CharField(label=_t("Directory"),
required=True,
help_text=_t("Path to directory"))
def __init__(self, *args, **kwargs):
self.fs = kwargs.pop('fs', None)
super(SaveResultsDirectoryForm, self).__init__(*args, **kwargs)
def clean_target_dir(self):
if not self.cleaned_data['target_dir'].startswith('/'):
raise forms.ValidationError(_("Target directory should begin with a /"))
elif self.fs.exists(self.cleaned_data['target_dir']):
raise forms.ValidationError(_('Directory already exists.'))
return self.cleaned_data['target_dir']
class SaveResultsFileForm(forms.Form):
"""Used for saving the query result data to hdfs file"""
target_file = forms.CharField(label=_t("File path"),
required=True,
help_text=_t("Path to file"))
overwrite = forms.BooleanField(label=_t('Overwrite'),
required=False,
help_text=_t("Overwrite the selected files"))
def clean_target_file(self):
if not self.cleaned_data['target_file'].startswith('/'):
raise forms.ValidationError("Target file should begin with a /")
return self.cleaned_data['target_file']
class SaveResultsTableForm(forms.Form):
"""Used for saving the query result data to hive table"""
target_table = common.HiveIdentifierField(
label=_t("Table Name"),
required=True,
help_text=_t("Name of the new table")) # Can also contain a DB prefixed table name, e.g. DB_NAME.TABLE_NAME
def __init__(self, *args, **kwargs):
self.db = kwargs.pop('db', None)
self.target_database = kwargs.pop('database', 'default')
super(SaveResultsTableForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(SaveResultsTableForm, self).clean()
target_table = cleaned_data.get('target_table')
if target_table:
try:
if self.db is not None:
name_parts = target_table.split(".")
if len(name_parts) == 1:
pass
elif len(name_parts) == 2:
self.target_database, target_table = name_parts
else:
self._errors['target_table'] = self.error_class([_('Invalid table prefix name')])
cleaned_data['target_table'] = target_table # Update table name without the DB prefix
self.db.get_table(self.target_database, target_table)
self._errors['target_table'] = self.error_class([_('Table already exists')])
del cleaned_data['target_table']
except Exception:
pass
return cleaned_data
class HQLForm(forms.Form):
query = forms.CharField(label=_t("Query Editor"),
required=True,
widget=forms.Textarea(attrs={'class': 'beeswax_query'}))
is_parameterized = forms.BooleanField(required=False, initial=True)
email_notify = forms.BooleanField(required=False, initial=False)
type = forms.IntegerField(required=False, initial=0)
database = forms.ChoiceField(required=False,
label='',
choices=(('default', 'default'),),
initial=0,
widget=forms.widgets.Select(attrs={'class': 'input-medium'}))
class FunctionForm(forms.Form):
name = forms.CharField(required=True)
class_name = forms.CharField(required=True)
FunctionFormSet = simple_formset_factory(FunctionForm)
class FileResourceForm(forms.Form):
type = forms.ChoiceField(required=True,
choices=[
("JAR", _("jar")),
("ARCHIVE", _("archive")),
("FILE", ("file")),
], help_text=_t("Resources to upload with your Hive job." +
" Use 'jar' for UDFs. Use 'file' and 'archive' for "
"files to be copied and made locally available during MAP/TRANSFORM. " +
"Paths are on HDFS.")
)
path = forms.CharField(required=True, help_text=_t("Path to file on HDFS."))
FileResourceFormSet = simple_formset_factory(FileResourceForm)
class SettingForm(forms.Form):
# TODO: There are common settings that should be always exposed?
key = forms.CharField()
value = forms.CharField()
SettingFormSet = simple_formset_factory(SettingForm)
# In theory, there are only 256 of these...
TERMINATOR_CHOICES = [ (hive_val, desc) for hive_val, desc, ascii in common.TERMINATORS ]
class CreateTableForm(DependencyAwareForm):
"""
Form used in the create table page
"""
dependencies = []
# Basic Data
name = common.HiveIdentifierField(label=_t("Table Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# Row Formatting
row_format = forms.ChoiceField(required=True,
choices=common.to_choices([ "Delimited", "SerDe" ]),
initial="Delimited")
# Delimited Row
# These initials are per LazySimpleSerDe.DefaultSeparators
field_terminator = ChoiceOrOtherField(label=_t("Field terminator"), required=False, initial=TERMINATOR_CHOICES[0][0],
choices=TERMINATOR_CHOICES)
collection_terminator = ChoiceOrOtherField(label=_t("Collection terminator"), required=False, initial=TERMINATOR_CHOICES[1][0],
choices=TERMINATOR_CHOICES)
map_key_terminator = ChoiceOrOtherField(label=_t("Map key terminator"), required=False, initial=TERMINATOR_CHOICES[2][0],
choices=TERMINATOR_CHOICES)
dependencies += [
("row_format", "Delimited", "field_terminator"),
("row_format", "Delimited", "collection_terminator"),
("row_format", "Delimited", "map_key_terminator"),
]
# Serde Row
serde_name = forms.CharField(required=False, label=_t("SerDe Name"))
serde_properties = forms.CharField(
required=False,
help_text=_t("Comma-separated list of key-value pairs. E.g. 'p1=v1, p2=v2'"))
dependencies += [
("row_format", "SerDe", "serde_name"),
("row_format", "SerDe", "serde_properties"),
]
# File Format
file_format = forms.ChoiceField(required=False, initial="TextFile",
choices=common.to_choices(["TextFile", "SequenceFile", "InputFormat"]),
widget=forms.RadioSelect)
input_format_class = forms.CharField(required=False, label=_t("InputFormat Class"))
output_format_class = forms.CharField(required=False, label=_t("OutputFormat Class"))
dependencies += [
("file_format", "InputFormat", "input_format_class"),
("file_format", "InputFormat", "output_format_class"),
]
# External?
use_default_location = forms.BooleanField(required=False, initial=True, label=_t("Use default location."))
external_location = forms.CharField(required=False, help_text=_t("Path to HDFS directory or file of table data."))
dependencies += [
("use_default_location", False, "external_location")
]
def clean_field_terminator(self):
return _clean_terminator(self.cleaned_data.get('field_terminator'))
def clean_collection_terminator(self):
return _clean_terminator(self.cleaned_data.get('collection_terminator'))
def clean_map_key_terminator(self):
return _clean_terminator(self.cleaned_data.get('map_key_terminator'))
def clean_name(self):
return _clean_tablename(self.db, self.cleaned_data['name'], self.database)
def _clean_tablename(db, name, database='default'):
try:
table = db.get_table(database, name)
if table.name:
raise forms.ValidationError(_('Table "%(name)s" already exists.') % {'name': name})
except Exception:
return name
def _clean_terminator(val):
if val is not None and val == '':
raise forms.ValidationError(_('Terminator must not be empty.'))
return val
class CreateByImportFileForm(forms.Form):
"""Form for step 1 (specifying file) of the import wizard"""
# Basic Data
name = common.HiveIdentifierField(label=_t("Table Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# File info
path = PathField(label=_t("Input File"))
do_import = forms.BooleanField(required=False, initial=True,
label=_t("Import data from file"),
help_text=_t("Automatically load this file into the table after creation."))
def __init__(self, *args, **kwargs):
self.db = kwargs.pop('db', None)
super(CreateByImportFileForm, self).__init__(*args, **kwargs)
def clean_name(self):
return _clean_tablename(self.db, self.cleaned_data['name'])
class CreateByImportDelimForm(forms.Form):
"""Form for step 2 (picking delimiter) of the import wizard"""
delimiter = ChoiceOrOtherField(label=_t('Delimiter'), required=False, initial=TERMINATOR_CHOICES[0][0],
choices=TERMINATOR_CHOICES)
file_type = forms.CharField(widget=forms.HiddenInput, required=True)
def clean(self):
# ChoiceOrOtherField doesn't work with required=True
delimiter = self.cleaned_data.get('delimiter')
if delimiter.isdigit():
try:
unichr(int(delimiter))
return int(delimiter)
except ValueError:
raise forms.ValidationError(_('Delimiter value must be smaller than 65533.'))
if not delimiter:
raise forms.ValidationError(_('Delimiter value is required.'))
_clean_terminator(delimiter)
return self.cleaned_data
# Note, struct is not currently supported. (Because it's recursive, for example.)
HIVE_TYPES = \
( "string", "tinyint", "smallint", "int", "bigint", "boolean",
"float", "double", "array", "map", "timestamp", "date",
"char", "varchar")
HIVE_PRIMITIVE_TYPES = \
("string", "tinyint", "smallint", "int", "bigint", "boolean",
"float", "double", "timestamp", "date", "char", "varchar")
class PartitionTypeForm(forms.Form):
column_name = common.HiveIdentifierField(required=True)
column_type = forms.ChoiceField(required=True, choices=common.to_choices(HIVE_PRIMITIVE_TYPES))
class ColumnTypeForm(DependencyAwareForm):
"""
Form used to specify a column during table creation
"""
dependencies = [
("column_type", "array", "array_type"),
("column_type", "map", "map_key_type"),
("column_type", "map", "map_value_type"),
("column_type", "char", "char_length"),
("column_type", "varchar", "varchar_length")
]
column_name = common.HiveIdentifierField(label=_t('Column Name'), required=True)
column_type = forms.ChoiceField(label=_t('Column Type'), required=True,
choices=common.to_choices(HIVE_TYPES))
array_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES), label=_t("Array Value Type"))
map_key_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES),
help_text=_t("Specify if column_type is map."))
map_value_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES),
help_text=_t("Specify if column_type is map."))
char_length = forms.IntegerField(required=False, initial=1,
widget=NumberInput(attrs={'min': 1, 'max': 255}),
validators=[MinValueValidator(1), MaxValueValidator(255)],
help_text=_t("Specify if column_type is char"))
varchar_length = forms.IntegerField(required=False, initial=1,
widget=NumberInput(attrs={'min': 1, 'max': 65355}),
validators=[MinValueValidator(1), MaxValueValidator(65355)],
help_text=_t("Specify if column_is varchar"))
ColumnTypeFormSet = simple_formset_factory(ColumnTypeForm, initial=[{}], add_label=_t("Add a column"))
# Default to no partitions
PartitionTypeFormSet = simple_formset_factory(PartitionTypeForm, add_label=_t("Add a partition"))
def _clean_databasename(name):
try:
if name in db.get_databases(): # Will always fail
raise forms.ValidationError(_('Database "%(name)s" already exists.') % {'name': name})
except Exception:
return name
class CreateDatabaseForm(DependencyAwareForm):
"""
Form used in the create database page
"""
dependencies = []
# Basic Data
name = common.HiveIdentifierField(label=_t("Database Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# External if not true
use_default_location = forms.BooleanField(required=False, initial=True, label=_t("Use default location"))
external_location = forms.CharField(required=False, help_text=_t("Path to HDFS directory or file of database data."))
dependencies += [
("use_default_location", False, "external_location")
]
def clean_name(self):
return _clean_databasename(self.cleaned_data['name'])
|
sposs/DIRAC
|
refs/heads/integration
|
Core/Utilities/ClassAd/__init__.py
|
14
|
############################################################
# $HeadURL$
############################################################
"""
DIRAC.Core.ClassAd package
"""
__RCSID__ = "$Id$"
|
vitan/django
|
refs/heads/master
|
django/conf/locale/sr_Latn/__init__.py
|
12133432
| |
eahneahn/free
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/sitemaps/management/__init__.py
|
12133432
| |
catch222/pupy
|
refs/heads/master
|
pupy/packages/windows/x86/psutil/_pssunos.py
|
80
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS Solaris platform implementation."""
import errno
import os
import socket
import subprocess
import sys
from collections import namedtuple
from . import _common
from . import _psposix
from . import _psutil_posix as cext_posix
from . import _psutil_sunos as cext
from ._common import isfile_strict, socktype_to_enum, sockfam_to_enum
from ._common import usage_percent
from ._compat import PY3
__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
AF_LINK = cext_posix.AF_LINK
CONN_IDLE = "IDLE"
CONN_BOUND = "BOUND"
PROC_STATUSES = {
cext.SSLEEP: _common.STATUS_SLEEPING,
cext.SRUN: _common.STATUS_RUNNING,
cext.SZOMB: _common.STATUS_ZOMBIE,
cext.SSTOP: _common.STATUS_STOPPED,
cext.SIDL: _common.STATUS_IDLE,
cext.SONPROC: _common.STATUS_RUNNING, # same as run
cext.SWAIT: _common.STATUS_WAITING,
}
TCP_STATUSES = {
cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.TCPS_CLOSED: _common.CONN_CLOSE,
cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
cext.TCPS_LISTEN: _common.CONN_LISTEN,
cext.TCPS_CLOSING: _common.CONN_CLOSING,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
cext.TCPS_IDLE: CONN_IDLE, # sunos specific
cext.TCPS_BOUND: CONN_BOUND, # sunos specific
}
scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pextmem = namedtuple('pextmem', ['rss', 'vms'])
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# set later from __init__.py
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# --- functions
disk_io_counters = cext.disk_io_counters
net_io_counters = cext.net_io_counters
disk_usage = _psposix.disk_usage
net_if_addrs = cext_posix.net_if_addrs
def virtual_memory():
# we could have done this with kstat, but imho this is good enough
total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
# note: there's no difference on Solaris
free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
used = total - free
percent = usage_percent(used, total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
sin, sout = cext.swap_mem()
# XXX
# we are supposed to get total/free by doing so:
# http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
# usr/src/cmd/swap/swap.c
# ...nevertheless I can't manage to obtain the same numbers as 'swap'
# cmdline utility, so let's parse its output (sigh!)
p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
os.environ['PATH'], 'swap', '-l', '-k'],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout = stdout.decode(sys.stdout.encoding)
if p.returncode != 0:
raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
lines = stdout.strip().split('\n')[1:]
if not lines:
raise RuntimeError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
t = t.replace('K', '')
f = f.replace('K', '')
total += int(int(t) * 1024)
free += int(int(f) * 1024)
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent,
sin * PAGE_SIZE, sout * PAGE_SIZE)
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir('/proc') if x.isdigit()]
def pid_exists(pid):
"""Check for the existence of a unix pid."""
return _psposix.pid_exists(pid)
def cpu_times():
"""Return system-wide CPU times as a named tuple"""
ret = cext.per_cpu_times()
return scputimes(*[sum(x) for x in zip(*ret)])
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples"""
ret = cext.per_cpu_times()
return [scputimes(*x) for x in ret]
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# mimic os.cpu_count() behavior
return None
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
localhost = (':0.0', ':0')
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname in localhost:
hostname = 'localhost'
nt = _common.suser(user, tty, hostname, tstamp)
retlist.append(nt)
return retlist
def disk_partitions(all=False):
"""Return system disk partitions."""
# TODO - the filtering logic should be better checked so that
# it tries to reflect 'df' as much as possible
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
# Differently from, say, Linux, we don't have a list of
# common fs types so the best we can do, AFAIK, is to
# filter by filesystem having a total size > 0.
if not disk_usage(mountpoint).total:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
Only INET sockets are returned (UNIX are not).
"""
cmap = _common.conn_tmap.copy()
if _pid == -1:
cmap.pop('unix', 0)
if kind not in cmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in cmap])))
families, types = _common.conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type_, laddr, raddr, status, pid = item
if fam not in families:
continue
if type_ not in types:
continue
status = TCP_STATUSES[status]
fam = sockfam_to_enum(fam)
type_ = socktype_to_enum(type_)
if _pid == -1:
nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = cext.net_if_stats()
for name, items in ret.items():
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if (NoSuchProcess is None or AccessDenied is None or
ZombieProcess is None):
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
# note: max len == 15
return cext.proc_name_and_args(self.pid)[0]
@wrap_exceptions
def exe(self):
# Will be guess later from cmdline but we want to explicitly
# invoke cmdline here in order to get an AccessDenied
# exception if the user has not enough privileges.
self.cmdline()
return ""
@wrap_exceptions
def cmdline(self):
return cext.proc_name_and_args(self.pid)[1].split(' ')
@wrap_exceptions
def create_time(self):
return cext.proc_basic_info(self.pid)[3]
@wrap_exceptions
def num_threads(self):
return cext.proc_basic_info(self.pid)[5]
@wrap_exceptions
def nice_get(self):
# For some reason getpriority(3) return ESRCH (no such process)
# for certain low-pid processes, no matter what (even as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
@wrap_exceptions
def nice_set(self, value):
if self.pid in (2, 3):
# Special case PIDs: internally setpriority(3) return ESRCH
# (no such process), no matter what.
# The process actually exists though, as it has a name,
# creation time, etc.
raise AccessDenied(self.pid, self._name)
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def ppid(self):
return cext.proc_basic_info(self.pid)[0]
@wrap_exceptions
def uids(self):
real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
return _common.puids(real, effective, saved)
@wrap_exceptions
def gids(self):
_, _, _, real, effective, saved = cext.proc_cred(self.pid)
return _common.puids(real, effective, saved)
@wrap_exceptions
def cpu_times(self):
user, system = cext.proc_cpu_times(self.pid)
return _common.pcputimes(user, system)
@wrap_exceptions
def terminal(self):
hit_enoent = False
tty = wrap_exceptions(
cext.proc_basic_info(self.pid)[0])
if tty != cext.PRNODEV:
for x in (0, 1, 2, 255):
try:
return os.readlink('/proc/%d/path/%d' % (self.pid, x))
except OSError as err:
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
@wrap_exceptions
def cwd(self):
# /proc/PID/path/cwd may not be resolved by readlink() even if
# it exists (ls shows it). If that's the case and the process
# is still alive return None (we can return None also on BSD).
# Reference: http://goo.gl/55XgO
try:
return os.readlink("/proc/%s/path/cwd" % self.pid)
except OSError as err:
if err.errno == errno.ENOENT:
os.stat("/proc/%s" % self.pid)
return None
raise
@wrap_exceptions
def memory_info(self):
ret = cext.proc_basic_info(self.pid)
rss, vms = ret[1] * 1024, ret[2] * 1024
return _common.pmem(rss, vms)
# it seems Solaris uses rss and vms only
memory_info_ex = memory_info
@wrap_exceptions
def status(self):
code = cext.proc_basic_info(self.pid)[6]
# XXX is '?' legit? (we're not supposed to return it anyway)
return PROC_STATUSES.get(code, '?')
@wrap_exceptions
def threads(self):
ret = []
tids = os.listdir('/proc/%d/lwp' % self.pid)
hit_enoent = False
for tid in tids:
tid = int(tid)
try:
utime, stime = cext.query_process_thread(
self.pid, tid)
except EnvironmentError as err:
# ENOENT == thread gone in meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
nt = _common.pthread(tid, utime, stime)
ret.append(nt)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def open_files(self):
retlist = []
hit_enoent = False
pathdir = '/proc/%d/path' % self.pid
for fd in os.listdir('/proc/%d/fd' % self.pid):
path = os.path.join(pathdir, fd)
if os.path.islink(path):
try:
file = os.readlink(path)
except OSError as err:
# ENOENT == file which is gone in the meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
if isfile_strict(file):
retlist.append(_common.popenfile(file, int(fd)))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
def _get_unix_sockets(self, pid):
"""Get UNIX sockets used by process by parsing 'pfiles' output."""
# TODO: rewrite this in C (...but the damn netstat source code
# does not include this part! Argh!!)
cmd = "pfiles %s" % pid
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if p.returncode != 0:
if 'permission denied' in stderr.lower():
raise AccessDenied(self.pid, self._name)
if 'no such process' in stderr.lower():
raise NoSuchProcess(self.pid, self._name)
raise RuntimeError("%r command error\n%s" % (cmd, stderr))
lines = stdout.split('\n')[2:]
for i, line in enumerate(lines):
line = line.lstrip()
if line.startswith('sockname: AF_UNIX'):
path = line.split(' ', 2)[2]
type = lines[i - 2].strip()
if type == 'SOCK_STREAM':
type = socket.SOCK_STREAM
elif type == 'SOCK_DGRAM':
type = socket.SOCK_DGRAM
else:
type = -1
yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
@wrap_exceptions
def connections(self, kind='inet'):
ret = net_connections(kind, _pid=self.pid)
# The underlying C implementation retrieves all OS connections
# and filters them by PID. At this point we can't tell whether
# an empty list means there were no connections for process or
# process is no longer active so we force NSP in case the PID
# is no longer there.
if not ret:
os.stat('/proc/%s' % self.pid) # will raise NSP if process is gone
# UNIX sockets
if kind in ('all', 'unix'):
ret.extend([_common.pconn(*conn) for conn in
self._get_unix_sockets(self.pid)])
return ret
nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
@wrap_exceptions
def memory_maps(self):
def toaddr(start, end):
return '%s-%s' % (hex(start)[2:].strip('L'),
hex(end)[2:].strip('L'))
retlist = []
rawlist = cext.proc_memory_maps(self.pid)
hit_enoent = False
for item in rawlist:
addr, addrsize, perm, name, rss, anon, locked = item
addr = toaddr(addr, addrsize)
if not name.startswith('['):
try:
name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
except OSError as err:
if err.errno == errno.ENOENT:
# sometimes the link may not be resolved by
# readlink() even if it exists (ls shows it).
# If that's the case we just return the
# unresolved link path.
# This seems an incosistency with /proc similar
# to: http://goo.gl/55XgO
name = '/proc/%s/path/%s' % (self.pid, name)
hit_enoent = True
else:
raise
retlist.append((addr, perm, name, rss, anon, locked))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def num_ctx_switches(self):
return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
|
foreverfaint/scrapy
|
refs/heads/master
|
scrapy/tests/test_contracts.py
|
7
|
from unittest import TextTestRunner
from twisted.trial import unittest
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.contracts import ContractsManager
from scrapy.contracts.default import (
UrlContract,
ReturnsContract,
ScrapesContract,
)
class TestItem(Item):
name = Field()
url = Field()
class ResponseMock(object):
url = 'http://scrapy.org'
class TestSpider(Spider):
name = 'demo_spider'
def returns_request(self, response):
""" method which returns request
@url http://scrapy.org
@returns requests 1
"""
return Request('http://scrapy.org', callback=self.returns_item)
def returns_item(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return TestItem(url=response.url)
def returns_fail(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return TestItem(url=response.url)
def scrapes_item_ok(self, response):
""" returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(name='test', url=response.url)
def scrapes_item_fail(self, response):
""" returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(url=response.url)
def parse_no_url(self, response):
""" method with no url
@returns items 1 1
"""
pass
class ContractsManagerTest(unittest.TestCase):
contracts = [UrlContract, ReturnsContract, ScrapesContract]
def setUp(self):
self.conman = ContractsManager(self.contracts)
self.results = TextTestRunner()._makeResult()
self.results.stream = None
def should_succeed(self):
self.assertFalse(self.results.failures)
self.assertFalse(self.results.errors)
def should_fail(self):
self.assertTrue(self.results.failures)
self.assertFalse(self.results.errors)
def test_contracts(self):
spider = TestSpider()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request)
self.assertEqual(len(contracts), 2)
self.assertEqual(frozenset(type(x) for x in contracts),
frozenset([UrlContract, ReturnsContract]))
# returns request for valid method
request = self.conman.from_method(spider.returns_request, self.results)
self.assertNotEqual(request, None)
# no request for missing url
request = self.conman.from_method(spider.parse_no_url, self.results)
self.assertEqual(request, None)
def test_returns(self):
spider = TestSpider()
response = ResponseMock()
# returns_item
request = self.conman.from_method(spider.returns_item, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [TestItem])
self.should_succeed()
# returns_request
request = self.conman.from_method(spider.returns_request, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [Request])
self.should_succeed()
# returns_fail
request = self.conman.from_method(spider.returns_fail, self.results)
request.callback(response)
self.should_fail()
def test_scrapes(self):
spider = TestSpider()
response = ResponseMock()
# scrapes_item_ok
request = self.conman.from_method(spider.scrapes_item_ok, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [TestItem])
self.should_succeed()
# scrapes_item_fail
request = self.conman.from_method(spider.scrapes_item_fail,
self.results)
request.callback(response)
self.should_fail()
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/object/tangible/wearables/armor/rebel_snow/armor_rebel_snow_leggings.py
|
14
|
import sys
def setup(core, object):
object.setStringAttribute('bio_link', '@obj_attr_n:bio_link_pending')
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_rebel_snow_armor_3')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_rebel_snow_armor_5')
object.setStringAttribute('@set_bonus:piece_bonus_count_7', '@set_bonus:set_bonus_rebel_snow_armor_7')
object.setStringAttribute('@set_bonus:piece_bonus_count_9', '@set_bonus:set_bonus_rebel_snow_armor_9')
object.setStringAttribute('@set_bonus:piece_bonus_count_11', '@set_bonus:set_bonus_rebel_snow_armor_11')
object.setAttachment('setBonus', 'set_bonus_rebel_snow_armor')
object.setStringAttribute('armor_category', '@obj_attr_n:armor_battle')
object.setIntAttribute('cat_armor_standard_protection.kinetic', 5400)
object.setIntAttribute('cat_armor_standard_protection.energy', 5400)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_heat', 5400)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_cold', 5400)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_acid', 5400)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_electricity', 5400)
object.setStringAttribute('faction_restriction', 'Rebel')
return
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/DirectClass.py
|
83
|
# direct class doc
class Foo(object):
"<the_doc>Doc of Foo."
pass
<the_ref>Foo
|
Isendir/brython
|
refs/heads/master
|
www/src/Lib/test/test_set.py
|
16
|
import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import warnings
import collections
import collections.abc
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
# C API test only available in a debug build
if hasattr(set, "test_c_api"):
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
# note: __length_hint__ is an internal undocumented API,
# don't rely on it in your own programs
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
p = pickle.dumps(self.set)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 indentical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
birocorneliu/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/brightcove.py
|
89
|
# encoding: utf-8
from __future__ import unicode_literals
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
unescapeHTML,
unsmuggle_url,
)
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3ABC2996102916001&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '2996102916001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'Red Bull TV',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
},
},
{
# playlist test
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return None
params = {}
playerID = find_param('playerID')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# The three fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC.\createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query_str, query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req = compat_urllib_request.Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
req.add_header('Referer', referer)
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' not in json_data:
raise ExtractorError('Empty playlist')
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': compat_str(video_info['id']),
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(url, info['id'], 'mp4'))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
size = rend.get('size')
formats.append({
'url': url,
'ext': ext,
'height': rend.get('frameHeight'),
'width': rend.get('frameWidth'),
'filesize': size if size != 0 else None,
})
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % info['id'])
return info
|
xNovax/SickRage
|
refs/heads/master
|
sickbeard/notifiers/pytivo.py
|
1
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import sickbeard
from urllib import urlencode
from urllib2 import Request, urlopen, HTTPError
from sickbeard import logger
from sickbeard.exceptions import ex
from sickrage.helper.encoding import ek
class pyTivoNotifier:
def notify_snatch(self, ep_name):
pass
def notify_download(self, ep_name):
pass
def notify_subtitle_download(self, ep_name, lang):
pass
def notify_git_update(self, new_version):
pass
def update_library(self, ep_obj):
# Values from config
if not sickbeard.USE_PYTIVO:
return False
host = sickbeard.PYTIVO_HOST
shareName = sickbeard.PYTIVO_SHARE_NAME
tsn = sickbeard.PYTIVO_TIVO_NAME
# There are two more values required, the container and file.
#
# container: The share name, show name and season
#
# file: The file name
#
# Some slicing and dicing of variables is required to get at these values.
#
# There might be better ways to arrive at the values, but this is the best I have been able to
# come up with.
#
# Calculated values
showPath = ep_obj.show.location
showName = ep_obj.show.name
rootShowAndSeason = ek(os.path.dirname, ep_obj.location)
absPath = ep_obj.location
# Some show names have colons in them which are illegal in a path location, so strip them out.
# (Are there other characters?)
showName = showName.replace(":", "")
root = showPath.replace(showName, "")
showAndSeason = rootShowAndSeason.replace(root, "")
container = shareName + "/" + showAndSeason
file = "/" + absPath.replace(root, "")
# Finally create the url and make request
requestUrl = "http://" + host + "/TiVoConnect?" + urlencode(
{'Command': 'Push', 'Container': container, 'File': file, 'tsn': tsn})
logger.log(u"pyTivo notification: Requesting " + requestUrl, logger.DEBUG)
request = Request(requestUrl)
try:
response = urlopen(request) #@UnusedVariable
except HTTPError , e:
if hasattr(e, 'reason'):
logger.log(u"pyTivo notification: Error, failed to reach a server - " + e.reason, logger.ERROR)
return False
elif hasattr(e, 'code'):
logger.log(u"pyTivo notification: Error, the server couldn't fulfill the request - " + e.code, logger.ERROR)
return False
except Exception, e:
logger.log(u"PYTIVO: Unknown exception: " + ex(e), logger.ERROR)
return False
else:
logger.log(u"pyTivo notification: Successfully requested transfer of file")
return True
notifier = pyTivoNotifier
|
manashmndl/scikit-learn
|
refs/heads/master
|
examples/mixture/plot_gmm.py
|
248
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
julian-seward1/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/lint/lint.py
|
23
|
from __future__ import print_function, unicode_literals
import abc
import argparse
import ast
import itertools
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from . import fnmatch
from ..localpaths import repo_root
from ..gitignore.gitignore import PathFilter
from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re
from six import binary_type, iteritems, itervalues
from six.moves import range
from six.moves.urllib.parse import urlsplit, urljoin
import logging
logger = None
def setup_logging(prefix=False):
global logger
if logger is None:
logger = logging.getLogger(os.path.basename(os.path.splitext(__file__)[0]))
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
if prefix:
format = logging.BASIC_FORMAT
else:
format = "%(message)s"
formatter = logging.Formatter(format)
for handler in logger.handlers:
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
setup_logging()
ERROR_MSG = """You must fix all errors; for details on how to fix them, see
https://github.com/w3c/web-platform-tests/blob/master/docs/lint-tool.md
However, instead of fixing a particular error, it's sometimes
OK to add a line to the lint.whitelist file in the root of the
web-platform-tests directory to make the lint tool ignore it.
For example, to make the lint tool ignore all '%s'
errors in the %s file,
you could add the following line to the lint.whitelist file.
%s:%s"""
def all_filesystem_paths(repo_root):
path_filter = PathFilter(repo_root, extras=[".git/*"])
for dirpath, dirnames, filenames in os.walk(repo_root):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), repo_root)
if path_filter(path):
yield path
dirnames[:] = [item for item in dirnames if
path_filter(os.path.relpath(os.path.join(dirpath, item) + "/",
repo_root))]
def _all_files_equal(paths):
"""
Checks all the paths are files that are byte-for-byte identical
:param paths: the list of paths to compare
:returns: True if they are all identical
"""
paths = list(paths)
if len(paths) < 2:
return True
first = paths.pop()
size = os.path.getsize(first)
if any(os.path.getsize(path) != size for path in paths):
return False
# Chunk this to avoid eating up memory and file descriptors
bufsize = 4096*4 # 16KB, a "reasonable" number of disk sectors
groupsize = 8 # Hypothesised to be large enough in the common case that everything fits in one group
with open(first, "rb") as first_f:
for start in range(0, len(paths), groupsize):
path_group = paths[start:start+groupsize]
first_f.seek(0)
try:
files = [open(x, "rb") for x in path_group]
for _ in range(0, size, bufsize):
a = first_f.read(bufsize)
for f in files:
b = f.read(bufsize)
if a != b:
return False
finally:
for f in files:
f.close()
return True
def check_path_length(repo_root, path, css_mode):
if len(path) + 1 > 150:
return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), path, None)]
return []
def check_worker_collision(repo_root, path, css_mode):
endings = [(".any.html", ".any.js"),
(".any.worker.html", ".any.js"),
(".worker.html", ".worker.js")]
for path_ending, generated in endings:
if path.endswith(path_ending):
return [("WORKER COLLISION",
"path ends with %s which collides with generated tests from %s files" % (path_ending, generated),
path,
None)]
return []
drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)")
w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)")
w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)")
def check_css_globally_unique(repo_root, paths, css_mode):
"""
Checks that CSS filenames are sufficiently unique
This groups files by path classifying them as "test", "reference", or
"support".
"test" files must have a unique name across files that share links to the
same spec.
"reference" and "support" files, on the other hand, must have globally
unique names.
:param repo_root: the repository root
:param paths: list of all paths
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``paths``
"""
test_files = defaultdict(set)
ref_files = defaultdict(set)
support_files = defaultdict(set)
for path in paths:
if os.name == "nt":
path = path.replace("\\", "/")
if not css_mode:
if not path.startswith("css/"):
continue
# we're within css or in css_mode after all that
source_file = SourceFile(repo_root, path, "/")
if source_file.name_is_non_test:
# If we're name_is_non_test for a reason apart from support, ignore it.
# We care about support because of the requirement all support files in css/ to be in
# a support directory; see the start of check_parsed.
offset = path.find("/support/")
if offset == -1:
continue
parts = source_file.dir_path.split(os.path.sep)
if (parts[0] in source_file.root_dir_non_test or
any(item in source_file.dir_non_test - {"support"} for item in parts) or
any(parts[:len(non_test_path)] == list(non_test_path) for non_test_path in source_file.dir_path_non_test)):
continue
name = path[offset+1:]
support_files[name].add(path)
elif source_file.name_is_reference:
ref_files[source_file.name].add(path)
else:
test_files[source_file.name].add(path)
errors = []
for name, colliding in iteritems(test_files):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
# Only compute by_spec if there are prima-facie collisions because of cost
by_spec = defaultdict(set)
for path in colliding:
source_file = SourceFile(repo_root, path, "/")
for link in source_file.spec_links:
for r in (drafts_csswg_re, w3c_tr_re, w3c_dev_re):
m = r.match(link)
if m:
spec = m.group(1)
break
else:
continue
by_spec[spec].add(path)
for spec, paths in iteritems(by_spec):
if not _all_files_equal([os.path.join(repo_root, x) for x in paths]):
for x in paths:
errors.append(("CSS-COLLIDING-TEST-NAME",
"The filename %s in the %s testsuite is shared by: %s"
% (name,
spec,
", ".join(sorted(paths))),
x,
None))
for error_name, d in [("CSS-COLLIDING-REF-NAME", ref_files),
("CSS-COLLIDING-SUPPORT-NAME", support_files)]:
for name, colliding in iteritems(d):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
for x in colliding:
errors.append((error_name,
"The filename %s is shared by: %s" % (name,
", ".join(sorted(colliding))),
x,
None))
return errors
def parse_whitelist(f):
"""
Parse the whitelist file given by `f`, and return the parsed structure.
"""
data = defaultdict(lambda:defaultdict(set))
ignored_files = set()
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_types, file_match, line_number = parts
error_types = {item.strip() for item in error_types.split(",")}
file_match = os.path.normcase(file_match)
if "*" in error_types:
ignored_files.add(file_match)
else:
for error_type in error_types:
data[error_type][file_match].add(line_number)
return data, ignored_files
def filter_whitelist_errors(data, errors):
"""
Filter out those errors that are whitelisted in `data`.
"""
if not errors:
return []
whitelisted = [False for item in range(len(errors))]
for i, (error_type, msg, path, line) in enumerate(errors):
normpath = os.path.normcase(path)
if error_type in data:
wl_files = data[error_type]
for file_match, allowed_lines in iteritems(wl_files):
if None in allowed_lines or line in allowed_lines:
if fnmatch.fnmatchcase(normpath, file_match):
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = b"[ \t\f\v]$"
error = "TRAILING WHITESPACE"
description = "Whitespace at EOL"
class TabsRegexp(Regexp):
pattern = b"^\t"
error = "INDENT TABS"
description = "Tabs used for indentation"
class CRRegexp(Regexp):
pattern = b"\r$"
error = "CR AT EOL"
description = "CR character in line separator"
class SetTimeoutRegexp(Regexp):
pattern = b"setTimeout\s*\("
error = "SET TIMEOUT"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "setTimeout used; step_timeout should typically be used instead"
class W3CTestOrgRegexp(Regexp):
pattern = b"w3c\-test\.org"
error = "W3C-TEST.ORG"
description = "External w3c-test.org domain used"
class Webidl2Regexp(Regexp):
pattern = b"webidl2\.js"
error = "WEBIDL2.JS"
description = "Legacy webidl2.js script used"
class ConsoleRegexp(Regexp):
pattern = b"console\.[a-zA-Z]+\s*\("
error = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Console logging API used"
class PrintRegexp(Regexp):
pattern = b"print(?:\s|\s*\()"
error = "PRINT STATEMENT"
file_extensions = [".py"]
description = "Print function used"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp,
SetTimeoutRegexp,
W3CTestOrgRegexp,
Webidl2Regexp,
ConsoleRegexp,
PrintRegexp]]
def check_regexp_line(repo_root, path, f, css_mode):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, regexp.description, path, i+1))
return errors
def check_parsed(repo_root, path, f, css_mode):
source_file = SourceFile(repo_root, path, "/", contents=f.read())
errors = []
if css_mode or path.startswith("css/"):
if (source_file.type == "support" and
not source_file.name_is_non_test and
not source_file.name_is_reference):
return [("SUPPORT-WRONG-DIR", "Support file not in support directory", path, None)]
if (source_file.type != "support" and
not source_file.name_is_reference and
not source_file.spec_links):
return [("MISSING-LINK", "Testcase file must have a link to a spec", path, None)]
if source_file.name_is_non_test or source_file.name_is_manual:
return []
if source_file.markup_type is None:
return []
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file", path, None)]
if source_file.type == "manual" and not source_file.name_is_manual:
return [("CONTENT-MANUAL", "Manual test whose filename doesn't end in '-manual'", path, None)]
if source_file.type == "visual" and not source_file.name_is_visual:
return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]
for reftest_node in source_file.reftest_nodes:
href = reftest_node.attrib.get("href", "")
parts = urlsplit(href)
if parts.scheme or parts.netloc:
errors.append(("ABSOLUTE-URL-REF",
"Reference test with a reference file specified via an absolute URL: '%s'" % href, path, None))
continue
ref_url = urljoin(source_file.url, href)
ref_parts = urlsplit(ref_url)
if source_file.url == ref_url:
errors.append(("SAME-FILE-REF",
"Reference test which points at itself as a reference",
path,
None))
continue
assert ref_parts.path != ""
reference_file = os.path.join(repo_root, ref_parts.path[1:])
reference_rel = reftest_node.attrib.get("rel", "")
if not os.path.isfile(reference_file):
errors.append(("NON-EXISTENT-REF",
"Reference test with a non-existent '%s' relationship reference: '%s'" % (reference_rel, href), path, None))
if len(source_file.timeout_nodes) > 1:
errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))
for timeout_node in source_file.timeout_nodes:
timeout_value = timeout_node.attrib.get("content", "").lower()
if timeout_value != "long":
errors.append(("INVALID-TIMEOUT", "Invalid timeout value %s" % timeout_value, path, None))
if source_file.testharness_nodes:
if len(source_file.testharness_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESS",
"More than one <script src='/resources/testharness.js'>", path, None))
testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
if not testharnessreport_nodes:
errors.append(("MISSING-TESTHARNESSREPORT",
"Missing <script src='/resources/testharnessreport.js'>", path, None))
else:
if len(testharnessreport_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESSREPORT",
"More than one <script src='/resources/testharnessreport.js'>", path, None))
testharnesscss_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']")
if testharnesscss_nodes:
errors.append(("PRESENT-TESTHARNESSCSS",
"Explicit link to testharness.css present", path, None))
for element in source_file.variant_nodes:
if "content" not in element.attrib:
errors.append(("VARIANT-MISSING",
"<meta name=variant> missing 'content' attribute", path, None))
else:
variant = element.attrib["content"]
if variant != "" and variant[0] not in ("?", "#"):
errors.append(("MALFORMED-VARIANT",
"%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))
seen_elements = {"timeout": False,
"testharness": False,
"testharnessreport": False}
required_elements = [key for key, value in {"testharness": True,
"testharnessreport": len(testharnessreport_nodes) > 0,
"timeout": len(source_file.timeout_nodes) > 0}.items()
if value]
for elem in source_file.root.iter():
if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
seen_elements["timeout"] = True
if seen_elements["testharness"]:
errors.append(("LATE-TIMEOUT",
"<meta name=timeout> seen after testharness.js script", path, None))
elif elem == source_file.testharness_nodes[0]:
seen_elements["testharness"] = True
elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
seen_elements["testharnessreport"] = True
if not seen_elements["testharness"]:
errors.append(("EARLY-TESTHARNESSREPORT",
"testharnessreport.js script seen before testharness.js script", path, None))
if all(seen_elements[name] for name in required_elements):
break
for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
src = element.attrib["src"]
for name in ["testharness", "testharnessreport"]:
if "%s.js" % name == src or ("/%s.js" % name in src and src != "/resources/%s.js" % name):
errors.append(("%s-PATH" % name.upper(), "%s.js script seen with incorrect path" % name, path, None))
return errors
class ASTCheck(object):
__metaclass__ = abc.ABCMeta
error = None
description = None
@abc.abstractmethod
def check(self, root):
pass
class OpenModeCheck(ASTCheck):
error = "OPEN-NO-MODE"
description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
def check(self, root):
errors = []
for node in ast.walk(root):
if isinstance(node, ast.Call):
if hasattr(node.func, "id") and node.func.id in ("open", "file"):
if (len(node.args) < 2 and
all(item.arg != "mode" for item in node.keywords)):
errors.append(node.lineno)
return errors
ast_checkers = [item() for item in [OpenModeCheck]]
def check_python_ast(repo_root, path, f, css_mode):
if not path.endswith(".py"):
return []
try:
root = ast.parse(f.read())
except SyntaxError as e:
return [("PARSE-FAILED", "Unable to parse file", path, e.lineno)]
errors = []
for checker in ast_checkers:
for lineno in checker.check(root):
errors.append((checker.error, checker.description, path, lineno))
return errors
broken_js_metadata = re.compile(b"//\s*META:")
broken_python_metadata = re.compile(b"#\s*META:")
def check_script_metadata(repo_root, path, f, css_mode):
if path.endswith((".worker.js", ".any.js")):
meta_re = js_meta_re
broken_metadata = broken_js_metadata
elif path.endswith(".py"):
meta_re = python_meta_re
broken_metadata = broken_python_metadata
else:
return []
done = False
errors = []
for idx, line in enumerate(f):
assert isinstance(line, binary_type), line
m = meta_re.match(line)
if m:
key, value = m.groups()
if key == b"timeout":
if value != b"long":
errors.append(("UNKNOWN-TIMEOUT-METADATA", "Unexpected value for timeout metadata", path, idx + 1))
elif key == b"script":
pass
else:
errors.append(("UNKNOWN-METADATA", "Unexpected kind of metadata", path, idx + 1))
else:
done = True
if done:
if meta_re.match(line):
errors.append(("STRAY-METADATA", "Metadata comments should start the file", path, idx + 1))
elif meta_re.search(line):
errors.append(("INDENTED-METADATA", "Metadata comments should start the line", path, idx + 1))
elif broken_metadata.search(line):
errors.append(("BROKEN-METADATA", "Metadata comment is not formatted correctly", path, idx + 1))
return errors
def check_path(repo_root, path, css_mode):
"""
Runs lints that check the file path.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``path``
"""
errors = []
for path_fn in path_lints:
errors.extend(path_fn(repo_root, path, css_mode))
return errors
def check_all_paths(repo_root, paths, css_mode):
"""
Runs lints that check all paths globally.
:param repo_root: the repository root
:param paths: a list of all the paths within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for paths_fn in all_paths_lints:
errors.extend(paths_fn(repo_root, paths, css_mode))
return errors
def check_file_contents(repo_root, path, f, css_mode):
"""
Runs lints that check the file contents.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param f: a file-like object with the file contents
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for file_fn in file_lints:
errors.extend(file_fn(repo_root, path, f, css_mode))
f.seek(0)
return errors
def output_errors_text(errors):
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s: %s (%s)" % (pos_string, description, error_type))
def output_errors_markdown(errors):
if not errors:
return
heading = """Got lint errors:
| Error Type | Position | Message |
|------------|----------|---------|"""
for line in heading.split("\n"):
logger.error(line)
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s | %s | %s |" % (error_type, pos_string, description))
def output_errors_json(errors):
for error_type, error, path, line_number in errors:
print(json.dumps({"path": path, "lineno": line_number,
"rule": error_type, "message": error}))
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.items())
count = sum(error_count.values())
logger.info("")
if count == 1:
logger.info("There was 1 error (%s)" % (by_type,))
else:
logger.info("There were %d errors (%s)" % (count, by_type))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*",
help="List of paths to lint")
parser.add_argument("--json", action="store_true",
help="Output machine-readable JSON format")
parser.add_argument("--markdown", action="store_true",
help="Output markdown")
parser.add_argument("--css-mode", action="store_true",
help="Run CSS testsuite specific lints")
return parser.parse_args()
def main(**kwargs):
if kwargs.get("json") and kwargs.get("markdown"):
logger.critical("Cannot specify --json and --markdown")
sys.exit(2)
output_format = {(True, False): "json",
(False, True): "markdown",
(False, False): "normal"}[(kwargs.get("json", False),
kwargs.get("markdown", False))]
paths = list(kwargs.get("paths") if kwargs.get("paths") else all_filesystem_paths(repo_root))
if output_format == "markdown":
setup_logging(True)
return lint(repo_root, paths, output_format, kwargs.get("css_mode", False))
def lint(repo_root, paths, output_format, css_mode):
error_count = defaultdict(int)
last = None
with open(os.path.join(repo_root, "lint.whitelist")) as f:
whitelist, ignored_files = parse_whitelist(f)
output_errors = {"json": output_errors_json,
"markdown": output_errors_markdown,
"normal": output_errors_text}[output_format]
def process_errors(errors):
"""
Filters and prints the errors, and updates the ``error_count`` object.
:param errors: a list of error tuples (error type, message, path, line number)
:returns: ``None`` if there were no errors, or
a tuple of the error type and the path otherwise
"""
errors = filter_whitelist_errors(whitelist, errors)
if not errors:
return None
output_errors(errors)
for error_type, error, path, line in errors:
error_count[error_type] += 1
return (errors[-1][0], path)
for path in paths[:]:
abs_path = os.path.join(repo_root, path)
if not os.path.exists(abs_path):
paths.remove(path)
continue
if any(fnmatch.fnmatch(path, file_match) for file_match in ignored_files):
paths.remove(path)
continue
errors = check_path(repo_root, path, css_mode)
last = process_errors(errors) or last
if not os.path.isdir(abs_path):
with open(abs_path, 'rb') as f:
errors = check_file_contents(repo_root, path, f, css_mode)
last = process_errors(errors) or last
errors = check_all_paths(repo_root, paths, css_mode)
last = process_errors(errors) or last
if output_format in ("normal", "markdown"):
output_error_count(error_count)
if error_count:
for line in (ERROR_MSG % (last[0], last[1], last[0], last[1])).split("\n"):
logger.info(line)
return sum(itervalues(error_count))
path_lints = [check_path_length, check_worker_collision]
all_paths_lints = [check_css_globally_unique]
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata]
if __name__ == "__main__":
args = parse_args()
error_count = main(**vars(args))
if error_count > 0:
sys.exit(1)
|
PetePriority/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/fitbit.py
|
5
|
"""
Support for the Fitbit API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fitbit/
"""
import os
import logging
import datetime
import time
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.const import CONF_UNIT_SYSTEM
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['fitbit==0.3.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = 'access_token'
ATTR_REFRESH_TOKEN = 'refresh_token'
ATTR_CLIENT_ID = 'client_id'
ATTR_CLIENT_SECRET = 'client_secret'
ATTR_LAST_SAVED_AT = 'last_saved_at'
CONF_MONITORED_RESOURCES = 'monitored_resources'
CONF_CLOCK_FORMAT = 'clock_format'
CONF_ATTRIBUTION = 'Data provided by Fitbit.com'
DEPENDENCIES = ['http']
FITBIT_AUTH_CALLBACK_PATH = '/api/fitbit/callback'
FITBIT_AUTH_START = '/api/fitbit'
FITBIT_CONFIG_FILE = 'fitbit.conf'
FITBIT_DEFAULT_RESOURCES = ['activities/steps']
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {
'client_id': 'CLIENT_ID_HERE',
'client_secret': 'CLIENT_SECRET_HERE'
}
FITBIT_RESOURCES_LIST = {
'activities/activityCalories': ['Activity Calories', 'cal', 'fire'],
'activities/calories': ['Calories', 'cal', 'fire'],
'activities/caloriesBMR': ['Calories BMR', 'cal', 'fire'],
'activities/distance': ['Distance', '', 'map-marker'],
'activities/elevation': ['Elevation', '', 'walk'],
'activities/floors': ['Floors', 'floors', 'walk'],
'activities/heart': ['Resting Heart Rate', 'bpm', 'heart-pulse'],
'activities/minutesFairlyActive':
['Minutes Fairly Active', 'minutes', 'walk'],
'activities/minutesLightlyActive':
['Minutes Lightly Active', 'minutes', 'walk'],
'activities/minutesSedentary':
['Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/minutesVeryActive': ['Minutes Very Active', 'minutes', 'run'],
'activities/steps': ['Steps', 'steps', 'walk'],
'activities/tracker/activityCalories':
['Tracker Activity Calories', 'cal', 'fire'],
'activities/tracker/calories': ['Tracker Calories', 'cal', 'fire'],
'activities/tracker/distance': ['Tracker Distance', '', 'map-marker'],
'activities/tracker/elevation': ['Tracker Elevation', '', 'walk'],
'activities/tracker/floors': ['Tracker Floors', 'floors', 'walk'],
'activities/tracker/minutesFairlyActive':
['Tracker Minutes Fairly Active', 'minutes', 'walk'],
'activities/tracker/minutesLightlyActive':
['Tracker Minutes Lightly Active', 'minutes', 'walk'],
'activities/tracker/minutesSedentary':
['Tracker Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/tracker/minutesVeryActive':
['Tracker Minutes Very Active', 'minutes', 'run'],
'activities/tracker/steps': ['Tracker Steps', 'steps', 'walk'],
'body/bmi': ['BMI', 'BMI', 'human'],
'body/fat': ['Body Fat', '%', 'human'],
'body/weight': ['Weight', '', 'human'],
'devices/battery': ['Battery', None, None],
'sleep/awakeningsCount':
['Awakenings Count', 'times awaken', 'sleep'],
'sleep/efficiency': ['Sleep Efficiency', '%', 'sleep'],
'sleep/minutesAfterWakeup': ['Minutes After Wakeup', 'minutes', 'sleep'],
'sleep/minutesAsleep': ['Sleep Minutes Asleep', 'minutes', 'sleep'],
'sleep/minutesAwake': ['Sleep Minutes Awake', 'minutes', 'sleep'],
'sleep/minutesToFallAsleep':
['Sleep Minutes to Fall Asleep', 'minutes', 'sleep'],
'sleep/startTime': ['Sleep Start Time', None, 'clock'],
'sleep/timeInBed': ['Sleep Time in Bed', 'minutes', 'hotel']
}
FITBIT_MEASUREMENTS = {
'en_US': {
'duration': 'ms',
'distance': 'mi',
'elevation': 'ft',
'height': 'in',
'weight': 'lbs',
'body': 'in',
'liquids': 'fl. oz.',
'blood glucose': 'mg/dL',
'battery': '',
},
'en_GB': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'stone',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
},
'metric': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'kilograms',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
}
}
BATTERY_LEVELS = {
'High': 100,
'Medium': 50,
'Low': 20,
'Empty': 0
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES):
vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default='24H'):
vol.In(['12H', '24H']),
vol.Optional(CONF_UNIT_SYSTEM, default='default'):
vol.In(['en_GB', 'en_US', 'metric', 'default'])
})
def request_app_setup(hass, config, add_entities, config_path,
discovery_info=None):
"""Assist user with configuring the Fitbit dev application."""
configurator = hass.components.configurator
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = ("You didn't correctly modify fitbit.conf",
" please try again")
configurator.notify_errors(_CONFIGURING['fitbit'],
error_msg)
else:
setup_platform(hass, config, add_entities, discovery_info)
else:
setup_platform(hass, config, add_entities, discovery_info)
start_url = "{}{}".format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
description = """Please create a Fitbit developer app at
https://dev.fitbit.com/apps/new.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(start_url, config_path)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description, submit_caption=submit,
description_image="/static/images/config_fitbit_app.png"
)
def request_oauth_completion(hass):
"""Request user complete Fitbit OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['fitbit'], "Failed to register, please try again.")
return
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_START)
description = "Please authorize Fitbit by visiting {}".format(start_url)
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Fitbit."
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fitbit sensor."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None)
return False
else:
save_json(config_path, DEFAULT_CONFIG)
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
import fitbit
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = fitbit.Fitbit(config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
unit_system = config.get(CONF_UNIT_SYSTEM)
if unit_system == 'default':
authd_client.system = authd_client. \
user_profile_get()["user"]["locale"]
if authd_client.system != 'en_GB':
if hass.config.units.is_metric:
authd_client.system = 'metric'
else:
authd_client.system = 'en_US'
else:
authd_client.system = unit_system
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == 'devices/battery':
for dev_extra in registered_devs:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format, dev_extra))
else:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format))
add_entities(dev, True)
else:
oauth = fitbit.api.FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET))
redirect_uri = '{}{}'.format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=['activity', 'heartrate', 'nutrition', 'profile',
'settings', 'sleep', 'weight'])
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(
config, add_entities, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = 'api:fitbit:callback'
def __init__(self, config, add_entities, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_entities = add_entities
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
hass = request.app['hass']
data = request.query
response_message = """Fitbit has been successfully authorized!
You can close this window now!"""
result = None
if data.get('code') is not None:
redirect_uri = '{}{}'.format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH)
try:
result = self.oauth.fetch_access_token(data.get('code'),
redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Fitbit Auth</title></head>
<body><h1>{}</h1></body></html>""".format(response_message)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get('access_token'),
ATTR_REFRESH_TOKEN: result.get('refresh_token'),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config,
self.add_entities)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Fitbit sensor."""
def __init__(self, client, config_path, resource_type,
is_metric, clock_format, extra=None):
"""Initialize the Fitbit sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = '{0} Battery'.format(self.extra.get('deviceVersion'))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split('/')
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS['metric']
else:
measurement_system = FITBIT_MEASUREMENTS['en_US']
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == 'devices/battery' and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get('battery')]
return icon_for_battery_level(battery_level=battery_level,
charging=None)
return 'mdi:{}'.format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = CONF_ATTRIBUTION
if self.extra:
attrs['model'] = self.extra.get('deviceVersion')
attrs['type'] = self.extra.get('type').lower()
return attrs
def update(self):
"""Get the latest data from the Fitbit API and update the states."""
if self.resource_type == 'devices/battery' and self.extra:
self._state = self.extra.get('battery')
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period='7d')
raw_state = response[container][-1].get('value')
if self.resource_type == 'activities/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'activities/tracker/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'body/bmi':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/fat':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/weight':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'sleep/startTime':
if raw_state == '':
self._state = '-'
elif self.clock_format == '12H':
hours, minutes = raw_state.split(':')
hours, minutes = int(hours), int(minutes)
setting = 'AM'
if hours > 12:
setting = 'PM'
hours -= 12
elif hours == 0:
hours = 12
self._state = '{}:{:02d} {}'.format(hours, minutes,
setting)
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = '{0:,}'.format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == 'activities/heart':
self._state = response[container][-1]. \
get('value').get('restingHeartRate')
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get('access_token'),
ATTR_REFRESH_TOKEN: token.get('refresh_token'),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(self.config_path, config_contents)
|
maurerpe/FreeCAD
|
refs/heads/master
|
src/Tools/generateTemplates/templateCMakeFile.py
|
12133432
| |
julienaubert/clothstream
|
refs/heads/master
|
clothstream/demo/management/commands/__init__.py
|
12133432
| |
gratipay/gratipay.com
|
refs/heads/master
|
gratipay/utils/ghost.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
PACKAGE_JSON = '''\
{
"name": "ghost",
"version": "1.0.0-alpha.21",
"description": "Just a blogging platform.",
"author": "Ghost Foundation",
"homepage": "http://ghost.org",
"keywords": [
"ghost",
"blog",
"cms"
],
"repository": {
"type": "git",
"url": "git://github.com/TryGhost/Ghost.git"
},
"bugs": "https://github.com/TryGhost/Ghost/issues",
"contributors": "https://github.com/TryGhost/Ghost/graphs/contributors",
"license": "MIT",
"main": "./core/index",
"scripts": {
"start": "node index",
"test": "grunt validate --verbose",
"init": "yarn global add knex-migrator ember-cli grunt-cli && yarn install && grunt symlink && grunt init || true"
},
"engines": {
"node": "^4.5.0 || ^6.9.0"
},
"dependencies": {
"amperize": "0.3.4",
"archiver": "1.3.0",
"bcryptjs": "2.4.3",
"bluebird": "3.5.0",
"body-parser": "1.17.1",
"bookshelf": "0.10.3",
"brute-knex": "https://github.com/cobbspur/brute-knex/tarball/37439f56965b17d29bb4ff9b3f3222b2f4bd6ce3",
"bson-objectid": "1.1.5",
"chalk": "1.1.3",
"cheerio": "0.22.0",
"compression": "1.6.2",
"connect-slashes": "1.3.1",
"cookie-session": "1.2.0",
"cors": "2.8.3",
"csv-parser": "1.11.0",
"debug": "2.6.6",
"downsize": "0.0.8",
"express": "4.15.2",
"express-brute": "1.0.1",
"express-hbs": "1.0.4",
"extract-zip-fork": "1.5.1",
"fs-extra": "3.0.1",
"ghost-gql": "0.0.6",
"ghost-ignition": "2.8.11",
"ghost-storage-base": "0.0.1",
"glob": "5.0.15",
"gscan": "1.1.0",
"html-to-text": "3.2.0",
"icojs": "0.7.2",
"image-size": "0.5.2",
"intl": "1.2.5",
"intl-messageformat": "1.3.0",
"jsdom": "9.12.0",
"jsonpath": "0.2.11",
"knex": "0.12.9",
"knex-migrator": "2.0.16",
"lodash": "4.17.4",
"markdown-it": "8.3.1",
"markdown-it-footnote": "3.0.1",
"markdown-it-lazy-headers": "0.1.3",
"markdown-it-mark": "2.0.0",
"markdown-it-named-headers": "0.0.4",
"mobiledoc-dom-renderer": "0.6.5",
"moment": "2.18.1",
"moment-timezone": "0.5.13",
"multer": "1.3.0",
"mysql": "2.13.0",
"nconf": "0.8.4",
"netjet": "1.1.3",
"nodemailer": "0.7.1",
"oauth2orize": "1.8.0",
"passport": "0.3.2",
"passport-ghost": "2.3.1",
"passport-http-bearer": "1.0.1",
"passport-oauth2-client-password": "0.1.2",
"path-match": "1.2.4",
"rss": "1.2.2",
"sanitize-html": "1.14.1",
"semver": "5.3.0",
"simple-dom": "0.3.2",
"simple-html-tokenizer": "0.4.1",
"superagent": "3.5.2",
"unidecode": "0.1.8",
"uuid": "3.0.1",
"validator": "6.3.0",
"xml": "1.0.1"
},
"optionalDependencies": {
"sqlite3": "3.1.8"
},
"devDependencies": {
"grunt": "1.0.1",
"grunt-bg-shell": "2.3.3",
"grunt-cli": "1.2.0",
"grunt-contrib-clean": "1.0.0",
"grunt-contrib-compress": "1.3.0",
"grunt-contrib-copy": "1.0.0",
"grunt-contrib-jshint": "1.0.0",
"grunt-contrib-symlink": "^1.0.0",
"grunt-contrib-uglify": "2.0.0",
"grunt-contrib-watch": "1.0.0",
"grunt-cssnano": "2.1.0",
"grunt-docker": "0.0.11",
"grunt-express-server": "0.5.3",
"grunt-jscs": "3.0.1",
"grunt-mocha-cli": "2.1.0",
"grunt-mocha-istanbul": "5.0.2",
"grunt-shell": "1.3.1",
"grunt-subgrunt": "1.2.0",
"grunt-update-submodules": "0.4.1",
"istanbul": "0.4.5",
"jshint": "2.9.4",
"jshint-stylish": "2.2.1",
"matchdep": "1.0.1",
"minimist": "1.2.0",
"mocha": "3.3.0",
"nock": "9.0.13",
"rewire": "2.5.2",
"run-sequence": "1.2.2",
"should": "11.2.1",
"should-http": "0.1.1",
"sinon": "1.17.7",
"supertest": "3.0.0",
"tmp": "0.0.31"
},
"greenkeeper": {
"ignore": [
"glob",
"nodemailer",
"grunt",
"grunt-bg-shell",
"grunt-cli",
"grunt-contrib-clean",
"grunt-contrib-compress",
"grunt-contrib-copy",
"grunt-contrib-jshint",
"grunt-contrib-uglify",
"grunt-contrib-watch",
"grunt-docker",
"grunt-express-server",
"grunt-jscs",
"grunt-mocha-cli",
"grunt-mocha-istanbul",
"grunt-shell",
"grunt-subgrunt",
"grunt-update-submodules",
"sinon"
]
}
}'''
|
hellwen/mytrade
|
refs/heads/master
|
mytrade/setup/views.py
|
1
|
# -*- coding: utf-8 -*-
from flask import (
Blueprint,
request,
flash,
url_for,
redirect,
)
from flask_login import login_required
from .models import (
Unit,
ItemGroup,
Company,
)
from .forms import (
UnitForm,
ItemGroupForm,
CompanyForm
)
from mytrade.extensions import (
db
)
from mytrade.utils import (
render,
_,
)
blueprint = Blueprint("setup", __name__, url_prefix='/setup',
static_folder="../static",
template_folder="../templates/setup")
@blueprint.route('/units', methods=['GET'])
@login_required
def units():
"""Unit Query"""
return_url = request.args.get('next', url_for(".units"))
models = Unit.query.all()
return render('units.html', models=models, return_url=return_url)
@blueprint.route('/unit/create', methods=['POST', 'GET'])
@login_required
def unit_create():
"""Unit Create"""
return_url = request.args.get('next', url_for(".units"))
model = Unit('')
form = UnitForm(next=return_url)
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.add(model)
db.session.commit()
if '_add_another' in request.form:
flash(_('Created successfully.'))
return redirect(url_for('.unit_create', url=return_url))
else:
return redirect(return_url)
except Exception, ex:
db.session.rollback()
flash(_('Failed to update model.') + '%(error)s' % {'error':str(ex)}, 'danger')
return render('unit_create.html', form=form,
return_url=return_url)
@blueprint.route('/unit/edit/id=<int:id>', methods=['GET', 'POST'])
@login_required
def unit_edit(id):
"""Unit Edit"""
return_url = request.args.get('next', url_for(".units"))
model = Unit.query.get(id)
if model is None:
return redirect(return_url)
form = UnitForm(obj=model, next=return_url)
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.commit()
return redirect(return_url)
except Exception:
db.session.rollback()
flash(_('Failed to update model.'))
return render('unit_edit.html', form=form,
return_url=return_url)
@blueprint.route('/unit/delete/id=<int:id>', methods=['POST'])
@login_required
def unit_delete(id):
"""Unit Delete"""
return_url = request.args.get('next', url_for(".units"))
model = Unit.query.filter_by(id=id).first()
if model:
model.delete()
return redirect(return_url)
@blueprint.route('/item_groups', methods=['GET'])
@login_required
def item_groups():
"""Item Group Query"""
return_url = request.args.get('next', url_for(".item_groups"))
models = ItemGroup.query.all()
return render('item_groups.html', models=models, return_url=return_url)
@blueprint.route('/item_group/create', methods=['POST', 'GET'])
@login_required
def item_group_create():
"""Item Group Create"""
return_url = request.args.get('next', url_for(".item_groups"))
model = ItemGroup('')
form = ItemGroupForm(next=return_url)
form.parent_id.choices = [(r.id, r.item_group_name) for r in ItemGroup.query.filter_by().order_by('item_group_name')]
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.add(model)
db.session.commit()
if '_add_another' in request.form:
flash(_('Created successfully.'))
return redirect(url_for('.item_group_create', url=return_url))
else:
return redirect(return_url)
except Exception, ex:
db.session.rollback()
flash(_('Failed to update model.') + '%(error)s' % {'error':str(ex)}, 'danger')
return render('item_group_create.html', form=form,
return_url=return_url)
@blueprint.route('/item_group/edit/id=<int:id>', methods=['GET', 'POST'])
@login_required
def item_group_edit(id):
"""Item Group Edit"""
return_url = request.args.get('next', url_for(".item_groups"))
model = ItemGroup.query.get(id)
if model is None:
return redirect(return_url)
form = ItemGroupForm(obj=model, next=return_url)
form.parent_id.choices = [(r.id, r.item_group_name) for r in ItemGroup.query.filter_by().order_by('item_group_name')]
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.commit()
return redirect(return_url)
except Exception:
db.session.rollback()
flash(_('Failed to update model.'))
return render('item_group_edit.html', form=form,
return_url=return_url)
@blueprint.route('/item_group/delete/id=<int:id>', methods=['POST'])
@login_required
def item_group_delete(id):
"""Item Group Delete"""
return_url = request.args.get('next', url_for(".item_groups"))
model = ItemGroup.query.get(id)
if model:
model.delete()
return redirect(return_url)
@blueprint.route('/companies', methods=['GET'])
@login_required
def companies():
"""Companies Query"""
return_url = request.args.get('next', url_for(".companies"))
models = Company.query.all()
return render('companies.html', models=models, return_url=return_url)
@blueprint.route('/company/create', methods=['POST', 'GET'])
@login_required
def company_create():
"""Company Create"""
return_url = request.args.get('next', url_for(".companies"))
model = Company('')
form = CompanyForm(next=return_url)
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.add(model)
db.session.commit()
if '_add_another' in request.form:
flash(_('Created successfully.'))
return redirect(url_for('.company_create', url=return_url))
else:
return redirect(return_url)
except Exception, ex:
db.session.rollback()
flash(_('Failed to update model.') + '%(error)s' % {'error':str(ex)}, 'danger')
return render('company_create.html', form=form,
return_url=return_url)
@blueprint.route('/company/edit/id=<int:id>', methods=['GET', 'POST'])
@login_required
def company_edit(id):
"""Company Edit"""
return_url = request.args.get('next', url_for(".companies"))
model = Company.query.get(id)
if model is None:
return redirect(return_url)
form = CompanyForm(obj=model, next=return_url)
if form.validate_on_submit():
try:
form.populate_obj(model)
db.session.commit()
return redirect(return_url)
except Exception:
db.session.rollback()
flash(_('Failed to update model.'))
return render('company_edit.html', form=form,
return_url=return_url)
@blueprint.route('/company/delete/id=<int:id>', methods=['POST'])
@login_required
def company_delete(id):
"""Company Delete"""
return_url = request.args.get('next', url_for(".companies"))
model = Company.query.get(id)
if model:
model.delete()
return redirect(return_url)
|
Phil-LiDAR2-Geonode/pl2-geonode
|
refs/heads/master
|
geonode/layers/admin.py
|
17
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib import admin
from geonode.base.admin import MediaTranslationAdmin, ResourceBaseAdminForm
from geonode.layers.models import Layer, Attribute, Style
from geonode.layers.models import LayerFile, UploadSession
class AttributeInline(admin.TabularInline):
model = Attribute
class LayerAdminForm(ResourceBaseAdminForm):
class Meta:
model = Layer
class LayerAdmin(MediaTranslationAdmin):
list_display = (
'id',
'typename',
'service_type',
'title',
'date',
'category')
list_display_links = ('id',)
list_editable = ('title', 'category')
list_filter = ('storeType', 'owner', 'category',
'restriction_code_type__identifier', 'date', 'date_type')
search_fields = ('typename', 'title', 'abstract', 'purpose',)
filter_horizontal = ('contacts',)
date_hierarchy = 'date'
readonly_fields = ('uuid', 'typename', 'workspace')
inlines = [AttributeInline]
form = LayerAdminForm
class AttributeAdmin(admin.ModelAdmin):
model = Attribute
list_display_links = ('id',)
list_display = (
'id',
'layer',
'attribute',
'description',
'attribute_label',
'attribute_type',
'display_order')
list_filter = ('layer', 'attribute_type')
search_fields = ('attribute', 'attribute_label',)
class StyleAdmin(admin.ModelAdmin):
model = Style
list_display_links = ('sld_title',)
list_display = ('id', 'name', 'sld_title', 'workspace', 'sld_url')
list_filter = ('workspace',)
search_fields = ('name', 'workspace',)
class LayerFileInline(admin.TabularInline):
model = LayerFile
class UploadSessionAdmin(admin.ModelAdmin):
model = UploadSession
list_display = ('date', 'user', 'processed')
inlines = [LayerFileInline]
admin.site.register(Layer, LayerAdmin)
admin.site.register(Attribute, AttributeAdmin)
admin.site.register(Style, StyleAdmin)
admin.site.register(UploadSession, UploadSessionAdmin)
|
ragupta-git/ImcSdk
|
refs/heads/master
|
imcsdk/mometa/bios/BiosVfMemoryMappedIOAbove4GB.py
|
1
|
"""This module contains the general information for BiosVfMemoryMappedIOAbove4GB ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfMemoryMappedIOAbove4GBConsts:
VP_MEMORY_MAPPED_IOABOVE4_GB_DISABLED = "Disabled"
VP_MEMORY_MAPPED_IOABOVE4_GB_ENABLED = "Enabled"
_VP_MEMORY_MAPPED_IOABOVE4_GB_DISABLED = "disabled"
_VP_MEMORY_MAPPED_IOABOVE4_GB_ENABLED = "enabled"
VP_MEMORY_MAPPED_IOABOVE4_GB_PLATFORM_DEFAULT = "platform-default"
class BiosVfMemoryMappedIOAbove4GB(ManagedObject):
"""This is BiosVfMemoryMappedIOAbove4GB class."""
consts = BiosVfMemoryMappedIOAbove4GBConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfMemoryMappedIOAbove4GB", "biosVfMemoryMappedIOAbove4GB", "Memory-mapped-IO-above-4GB", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfMemoryMappedIOAbove4GB", "biosVfMemoryMappedIOAbove4GB", "Memory-mapped-IO-above-4GB", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_memory_mapped_io_above4_gb": MoPropertyMeta("vp_memory_mapped_io_above4_gb", "vpMemoryMappedIOAbove4GB", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_memory_mapped_io_above4_gb": MoPropertyMeta("vp_memory_mapped_io_above4_gb", "vpMemoryMappedIOAbove4GB", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpMemoryMappedIOAbove4GB": "vp_memory_mapped_io_above4_gb",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpMemoryMappedIOAbove4GB": "vp_memory_mapped_io_above4_gb",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_memory_mapped_io_above4_gb = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfMemoryMappedIOAbove4GB", parent_mo_or_dn, **kwargs)
|
x-kalux/bitcoin_WiG-B
|
refs/heads/W0.1
|
test/functional/mempool_resurrect_test.py
|
35
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
# Just need one node for this test
self.extra_args = [["-checkmempool"]]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
Russell-IO/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/junos_config.py
|
29
|
#
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.junos import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.module_utils._text import to_bytes
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
with open(filename, 'wb') as f:
f.write(to_bytes(to_text(contents, encoding='latin-1'), encoding='utf-8'))
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
seann1/portfolio5
|
refs/heads/master
|
.meteor/dev_bundle/python/Lib/ctypes/test/test_prototypes.py
|
36
|
from ctypes import *
from ctypes.test import need_symbol
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1L << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError, details:
self.assertEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.assertEqual(func(None), None)
self.assertEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.assertEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.assertEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_void_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
@need_symbol('c_wchar_p')
def test_c_void_p_arg_with_c_wchar_p(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_void_p,
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.assertEqual(None, func(X()))
func.argtypes = None
self.assertEqual(None, func(X()))
@need_symbol('c_wchar')
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.assertEqual(None, func(None))
self.assertEqual(u"123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
self.assertEqual(u"123", func(c_wbuffer(u"123")))
ca = c_wchar("a")
self.assertEqual(u"a", func(pointer(ca))[0])
self.assertEqual(u"a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param(u"123")
self.assertEqual(None, func(None))
self.assertEqual("123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main()
|
WarrenWeckesser/scikits-image
|
refs/heads/master
|
skimage/feature/tests/test_match.py
|
23
|
import numpy as np
from numpy.testing import assert_equal, assert_raises
from skimage import data
from skimage import transform as tf
from skimage.color import rgb2gray
from skimage.feature import (BRIEF, match_descriptors,
corner_peaks, corner_harris)
def test_binary_descriptors_unequal_descriptor_sizes_error():
"""Sizes of descriptors of keypoints to be matched should be equal."""
descs1 = np.array([[True, True, False, True],
[False, True, False, True]])
descs2 = np.array([[True, False, False, True, False],
[False, True, True, True, False]])
assert_raises(ValueError, match_descriptors, descs1, descs2)
def test_binary_descriptors():
descs1 = np.array([[True, True, False, True, True],
[False, True, False, True, True]])
descs2 = np.array([[True, False, False, True, False],
[False, False, True, True, True]])
matches = match_descriptors(descs1, descs2)
assert_equal(matches, [[0, 0], [1, 1]])
def test_binary_descriptors_lena_rotation_crosscheck_false():
"""Verify matched keypoints and their corresponding masks results between
lena image and its rotated version with the expected keypoint pairs with
cross_check disabled."""
img = data.lena()
img = rgb2gray(img)
tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
rotated_img = tf.warp(img, tform, clip=False)
extractor = BRIEF(descriptor_size=512)
keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
extractor.extract(img, keypoints1)
descriptors1 = extractor.descriptors
keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
extractor.extract(rotated_img, keypoints2)
descriptors2 = extractor.descriptors
matches = match_descriptors(descriptors1, descriptors2, cross_check=False)
exp_matches1 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46])
exp_matches2 = np.array([33, 0, 35, 7, 1, 35, 3, 2, 3, 6, 4, 9,
11, 10, 28, 7, 8, 5, 31, 14, 13, 15, 21, 16,
16, 13, 17, 18, 19, 21, 22, 23, 0, 24, 1, 24,
23, 0, 26, 27, 25, 34, 28, 14, 29, 30, 21])
assert_equal(matches[:, 0], exp_matches1)
assert_equal(matches[:, 1], exp_matches2)
def test_binary_descriptors_lena_rotation_crosscheck_true():
"""Verify matched keypoints and their corresponding masks results between
lena image and its rotated version with the expected keypoint pairs with
cross_check enabled."""
img = data.lena()
img = rgb2gray(img)
tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
rotated_img = tf.warp(img, tform, clip=False)
extractor = BRIEF(descriptor_size=512)
keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
extractor.extract(img, keypoints1)
descriptors1 = extractor.descriptors
keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
extractor.extract(rotated_img, keypoints2)
descriptors2 = extractor.descriptors
matches = match_descriptors(descriptors1, descriptors2, cross_check=True)
exp_matches1 = np.array([ 0, 1, 2, 4, 6, 7, 9, 10, 11, 12, 13, 15,
16, 17, 19, 20, 21, 24, 26, 27, 28, 29, 30, 35,
36, 38, 39, 40, 42, 44, 45])
exp_matches2 = np.array([33, 0, 35, 1, 3, 2, 6, 4, 9, 11, 10, 7,
8, 5, 14, 13, 15, 16, 17, 18, 19, 21, 22, 24,
23, 26, 27, 25, 28, 29, 30])
assert_equal(matches[:, 0], exp_matches1)
assert_equal(matches[:, 1], exp_matches2)
def test_max_distance():
descs1 = np.zeros((10, 128))
descs2 = np.zeros((15, 128))
descs1[0, :] = 1
matches = match_descriptors(descs1, descs2, metric='euclidean',
max_distance=0.1, cross_check=False)
assert len(matches) == 9
matches = match_descriptors(descs1, descs2, metric='euclidean',
max_distance=np.sqrt(128.1),
cross_check=False)
assert len(matches) == 10
matches = match_descriptors(descs1, descs2, metric='euclidean',
max_distance=0.1,
cross_check=True)
assert_equal(matches, [[1, 0]])
matches = match_descriptors(descs1, descs2, metric='euclidean',
max_distance=np.sqrt(128.1),
cross_check=True)
assert_equal(matches, [[1, 0]])
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
davidbroadwater/nyc-subway-datascience-project
|
refs/heads/master
|
lesson_3/gradient_descent/gradient_descent.py
|
1
|
import numpy
import pandas
def normalize_features(array):
"""
Normalize the features in our data set.
"""
array_normalized = (array - array.mean())/array.std()
mu = array.mean()
sigma = array.std()
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values, and values for our thetas.
"""
m = len(values)
sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
"""
# Write some code here that updates the values of theta a number of times equal to
# num_iterations. Everytime you have computed the cost for a given set of thetas,
# you should append it to cost_history. The function should return both the final
# values of theta and the cost history.
# YOUR CODE GOES HERE
#Initialize cost_history
cost_history = [0 for i in range(num_iterations)]
# Recalculate m
m = len(values)
for i in range (num_iterations):
cost_history[i] = compute_cost(features, values, theta)
prediction = numpy.dot(features, theta)
theta = theta + (alpha/m)* numpy.dot(values-prediction,features)
return theta, pandas.Series(cost_history) # leave this line for the grader
if __name__ == '__main__':
# Read data into a pandas dataframe.
data = pandas.read_csv('baseball_stats_regression.csv')
# Isolate features / values.
features = data[['height', 'weight']]
values = data[['HR']]
m = len(values)
# Normalize features.
features, mu, sigma = normalize_features(features)
# Add a column of ones to features for constant term.
features['ones'] = numpy.ones(m)
features_array = numpy.array(features[['ones', 'height', 'weight']])
values_array = numpy.array(values).flatten()
# Set values for alpha, number of iterations.
alpha = 0.01
num_iterations = 1000
# Initialize theta and perform gradient descent.
theta_gradient_descent = numpy.zeros(3)
theta_gradient_descent, cost_history = gradient_descent(features_array, values_array, theta_gradient_descent,
alpha, num_iterations)
print "Theta =\n{theta}\n\nCost History = \n{history}".format(theta=theta_gradient_descent, history=cost_history)
|
Yrthgze/prueba-sourcetree2
|
refs/heads/master
|
burness/0006/extract_key_word_Shlomi_Babluki.py
|
40
|
# coding=UTF-8
import nltk
from nltk.corpus import brown
import os
# This is a fast and simple noun phrase extractor (based on NLTK)
# Feel free to use it, just keep a link back to this post
# http://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/
# http://www.sharejs.com/codes/
# Create by Shlomi Babluki
# May, 2013
# This is our fast Part of Speech tagger
#############################################################################
brown_train = brown.tagged_sents(categories='news')
regexp_tagger = nltk.RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'(-|:|;)$', ':'),
(r'\'*$', 'MD'),
(r'(The|the|A|a|An|an)$', 'AT'),
(r'.*able$', 'JJ'),
(r'^[A-Z].*$', 'NNP'),
(r'.*ness$', 'NN'),
(r'.*ly$', 'RB'),
(r'.*s$', 'NNS'),
(r'.*ing$', 'VBG'),
(r'.*ed$', 'VBD'),
(r'.*', 'NN')
])
unigram_tagger = nltk.UnigramTagger(brown_train, backoff=regexp_tagger)
bigram_tagger = nltk.BigramTagger(brown_train, backoff=unigram_tagger)
#############################################################################
# This is our semi-CFG; Extend it according to your own needs
#############################################################################
cfg = {}
cfg["NNP+NNP"] = "NNP"
cfg["NN+NN"] = "NNI"
cfg["NNI+NN"] = "NNI"
cfg["JJ+JJ"] = "JJ"
cfg["JJ+NN"] = "NNI"
#############################################################################
class NPExtractor(object):
def __init__(self, sentence):
self.sentence = sentence
# Split the sentence into singlw words/tokens
def tokenize_sentence(self, sentence):
tokens = nltk.word_tokenize(sentence)
return tokens
# Normalize brown corpus' tags ("NN", "NN-PL", "NNS" > "NN")
def normalize_tags(self, tagged):
n_tagged = []
for t in tagged:
if t[1] == "NP-TL" or t[1] == "NP":
n_tagged.append((t[0], "NNP"))
continue
if t[1].endswith("-TL"):
n_tagged.append((t[0], t[1][:-3]))
continue
if t[1].endswith("S"):
n_tagged.append((t[0], t[1][:-1]))
continue
n_tagged.append((t[0], t[1]))
return n_tagged
# Extract the main topics from the sentence
def extract(self):
tokens = self.tokenize_sentence(self.sentence)
tags = self.normalize_tags(bigram_tagger.tag(tokens))
merge = True
while merge:
merge = False
for x in range(0, len(tags) - 1):
t1 = tags[x]
t2 = tags[x + 1]
key = "%s+%s" % (t1[1], t2[1])
value = cfg.get(key, '')
if value:
merge = True
tags.pop(x)
tags.pop(x)
match = "%s %s" % (t1[0], t2[0])
pos = value
tags.insert(x, (match, pos))
break
matches = []
for t in tags:
if t[1] == "NNP" or t[1] == "NNI":
# if t[1] == "NNP" or t[1] == "NNI" or t[1] == "NN":
matches.append(t[0])
return matches
# Main method, just run "python np_extractor.py"
def main():
path = '.'
for file in os.listdir(path):
text = []
if file.endswith('.txt'):
with open(file, 'rt') as f:
for line in f:
words = line.split()
text += words
str_text=' '.join(text)
np_extractor = NPExtractor(str_text)
result = np_extractor.extract()
print("This file is about: %s" % ", ".join(result))
if __name__ == '__main__':
main()
|
hertzsprung/AtmosTests
|
refs/heads/master
|
generators/deformationPlane.py
|
1
|
from ninjaopenfoam import GhostMesh, DeformationPlaneBuilder, DeformationPlaneCollated
import os
class DeformationPlane:
def __init__(self, parallel, fast):
fastMesh = GhostMesh('deformationPlane-mesh-fast', os.path.join('src/deformationPlane/mesh-fast'))
meshUniform6 = GhostMesh('deformationPlane-mesh-uniform-6', os.path.join('src/deformationPlane/mesh-uniform-6'))
meshUniform3 = GhostMesh('deformationPlane-mesh-uniform-3', os.path.join('src/deformationPlane/mesh-uniform-3'))
meshUniform2 = GhostMesh('deformationPlane-mesh-uniform-2', os.path.join('src/deformationPlane/mesh-uniform-2'))
meshUniform1_5 = GhostMesh('deformationPlane-mesh-uniform-1.5', os.path.join('src/deformationPlane/mesh-uniform-1.5'))
meshUniform1 = GhostMesh('deformationPlane-mesh-uniform-1', os.path.join('src/deformationPlane/mesh-uniform-1'))
meshUniform0_75 = GhostMesh('deformationPlane-mesh-uniform-0.75', os.path.join('src/deformationPlane/mesh-uniform-0.75'))
meshUniform0_5 = GhostMesh('deformationPlane-mesh-uniform-0.5', os.path.join('src/deformationPlane/mesh-uniform-0.5'))
meshUniform0_375 = GhostMesh('deformationPlane-mesh-uniform-0.375', os.path.join('src/deformationPlane/mesh-uniform-0.375'))
meshDistorted6 = GhostMesh('deformationPlane-mesh-distorted-6', os.path.join('src/deformationPlane/mesh-distorted-6'))
meshDistorted3 = GhostMesh('deformationPlane-mesh-distorted-3', os.path.join('src/deformationPlane/mesh-distorted-3'))
meshDistorted2 = GhostMesh('deformationPlane-mesh-distorted-2', os.path.join('src/deformationPlane/mesh-distorted-2'))
meshDistorted1_5 = GhostMesh('deformationPlane-mesh-distorted-1.5', os.path.join('src/deformationPlane/mesh-distorted-1.5'))
meshDistorted1 = GhostMesh('deformationPlane-mesh-distorted-1', os.path.join('src/deformationPlane/mesh-distorted-1'))
meshDistorted0_75 = GhostMesh('deformationPlane-mesh-distorted-0.75', os.path.join('src/deformationPlane/mesh-distorted-0.75'))
meshDistorted0_5 = GhostMesh('deformationPlane-mesh-distorted-0.5', os.path.join('src/deformationPlane/mesh-distorted-0.5'))
meshDistorted0_375 = GhostMesh('deformationPlane-mesh-distorted-0.375', os.path.join('src/deformationPlane/mesh-distorted-0.375'))
self.meshes = [
fastMesh,
meshUniform6, meshUniform3, meshUniform2, meshUniform1_5,
meshUniform1, meshUniform0_75, meshUniform0_5, meshUniform0_375,
meshDistorted6, meshDistorted3, meshDistorted2, meshDistorted1_5,
meshDistorted1, meshDistorted0_75, meshDistorted0_5, meshDistorted0_375
]
deformationPlane = DeformationPlaneBuilder(
parallel=parallel,
fast=fast,
fastMesh=fastMesh)
self.uniformCubicFitCollated = deformationPlane.collated(
'deformationPlane-uniform-cubicFit-collated',
fvSchemes=os.path.join('src/deformationPlane/cubicFit'),
tests=[
DeformationPlaneCollated.Test('deformationPlane-uniform-6-cubicFit', 6, meshUniform6, timestep=0.01),
DeformationPlaneCollated.Test('deformationPlane-uniform-3-cubicFit', 3, meshUniform3, timestep=0.005),
DeformationPlaneCollated.Test('deformationPlane-uniform-2-cubicFit', 2, meshUniform2, timestep=0.004),
DeformationPlaneCollated.Test('deformationPlane-uniform-1.5-cubicFit', 1.5, meshUniform1_5, timestep=0.0025),
DeformationPlaneCollated.Test('deformationPlane-uniform-1-cubicFit', 1, meshUniform1, timestep=0.002),
DeformationPlaneCollated.Test('deformationPlane-uniform-0.75-cubicFit', 0.75, meshUniform0_75, timestep=0.00125),
DeformationPlaneCollated.Test('deformationPlane-uniform-0.5-cubicFit', 0.5, meshUniform0_5, timestep=0.001),
DeformationPlaneCollated.Test('deformationPlane-uniform-0.375-cubicFit', 0.375, meshUniform0_375, timestep=0.000625)
])
self.uniformHighOrderFitCollated = deformationPlane.collated(
'deformationPlane-uniform-highOrderFit-collated',
fvSchemes=os.path.join('src/deformationPlane/highOrderFit'),
tests=[
DeformationPlaneCollated.Test('deformationPlane-uniform-6-highOrderFit', 6, meshUniform6, timestep=0.01),
DeformationPlaneCollated.Test('deformationPlane-uniform-3-highOrderFit', 3, meshUniform3, timestep=0.005),
DeformationPlaneCollated.Test('deformationPlane-uniform-2-highOrderFit', 2, meshUniform2, timestep=0.004),
DeformationPlaneCollated.Test('deformationPlane-uniform-1.5-highOrderFit', 1.5, meshUniform1_5, timestep=0.0025),
DeformationPlaneCollated.Test('deformationPlane-uniform-1-highOrderFit', 1, meshUniform1, timestep=0.002),
DeformationPlaneCollated.Test('deformationPlane-uniform-0.75-highOrderFit', 0.75, meshUniform0_75, timestep=0.00125),
],
controlDict=os.path.join('src/deformationPlane/controlDict.highOrderFit.template'),
solverRule='scalarDeformationHighOrderFit')
self.distortedCubicFitCollated = deformationPlane.collated(
'deformationPlane-distorted-cubicFit-collated',
fvSchemes=os.path.join('src/deformationPlane/cubicFit'),
tests=[
DeformationPlaneCollated.Test('deformationPlane-distorted-6-cubicFit', 6, meshDistorted6, timestep=0.01),
DeformationPlaneCollated.Test('deformationPlane-distorted-3-cubicFit', 3, meshDistorted3, timestep=0.005),
DeformationPlaneCollated.Test('deformationPlane-distorted-2-cubicFit', 2, meshDistorted2, timestep=0.004),
DeformationPlaneCollated.Test('deformationPlane-distorted-1.5-cubicFit', 1.5, meshDistorted1_5, timestep=0.0025),
DeformationPlaneCollated.Test('deformationPlane-distorted-1-cubicFit', 1, meshDistorted1, timestep=0.002),
DeformationPlaneCollated.Test('deformationPlane-distorted-0.75-cubicFit', 0.75, meshDistorted0_75, timestep=0.00125),
DeformationPlaneCollated.Test('deformationPlane-distorted-0.5-cubicFit', 0.5, meshDistorted0_5, timestep=0.001),
DeformationPlaneCollated.Test('deformationPlane-distorted-0.375-cubicFit', 0.375, meshDistorted0_375, timestep=0.000625)
])
self.distortedHighOrderFitCollated = deformationPlane.collated(
'deformationPlane-distorted-highOrderFit-collated',
fvSchemes=os.path.join('src/deformationPlane/highOrderFit'),
tests=[
DeformationPlaneCollated.Test('deformationPlane-distorted-6-highOrderFit', 6, meshDistorted6, timestep=0.01),
DeformationPlaneCollated.Test('deformationPlane-distorted-3-highOrderFit', 3, meshDistorted3, timestep=0.005),
DeformationPlaneCollated.Test('deformationPlane-distorted-2-highOrderFit', 2, meshDistorted2, timestep=0.004),
DeformationPlaneCollated.Test('deformationPlane-distorted-1.5-highOrderFit', 1.5, meshDistorted1_5, timestep=0.0025),
DeformationPlaneCollated.Test('deformationPlane-distorted-1-highOrderFit', 1, meshDistorted1, timestep=0.002),
DeformationPlaneCollated.Test('deformationPlane-distorted-0.75-highOrderFit', 0.75, meshDistorted0_75, timestep=0.00125),
],
controlDict=os.path.join('src/deformationPlane/controlDict.highOrderFit.template'),
solverRule='scalarDeformationHighOrderFit')
def addTo(self, build):
build.addAll(self.meshes)
build.add(self.uniformCubicFitCollated)
build.addAll(self.uniformCubicFitCollated.tests)
build.add(self.uniformHighOrderFitCollated)
build.addAll(self.uniformHighOrderFitCollated.tests)
build.add(self.distortedCubicFitCollated)
build.addAll(self.distortedCubicFitCollated.tests)
build.add(self.distortedHighOrderFitCollated)
build.addAll(self.distortedHighOrderFitCollated.tests)
|
louissobel/828-qemu-fork
|
refs/heads/tpm
|
scripts/qapi-types.py
|
54
|
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members, builtin_type=False):
if builtin_type:
return mcgen('''
typedef struct %(name)sList
{
union {
%(type)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
type=c_type(name),
name=name)
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
union {
%(name)s *value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_fwd_enum_struct(name, members):
return mcgen('''
typedef struct %(name)sList
{
union {
%(name)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct_fields(members):
ret = ''
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct({ "field": argname, "data": argentry})
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
return ret
def generate_struct(expr):
structname = expr.get('type', "")
fieldname = expr.get('field', "")
members = expr['data']
base = expr.get('base')
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
if base:
ret += generate_struct_fields({'base': base})
ret += generate_struct_fields(members)
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value)
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum_name(name):
if name.isupper():
return c_fun(name, False)
new_name = ''
for c in c_fun(name, False):
if c.isupper():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=generate_enum_name(value),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_anon_union_qtypes(expr):
name = expr['union']
members = expr['data']
ret = mcgen('''
const int %(name)s_qtypes[QTYPE_MAX] = {
''',
name=name)
for key in members:
qapi_type = members[key]
if builtin_type_qtypes.has_key(qapi_type):
qtype = builtin_type_qtypes[qapi_type]
elif find_struct(qapi_type):
qtype = "QTYPE_QDICT"
elif find_union(qapi_type):
qtype = "QTYPE_QDICT"
else:
assert False, "Invalid anonymous union member"
ret += mcgen('''
[ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s,
''',
qtype = qtype,
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key),False).upper())
ret += mcgen('''
};
''')
return ret
def generate_union(expr):
name = expr['union']
typeinfo = expr['data']
base = expr.get('base')
discriminator = expr.get('discriminator')
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
void *data;
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
''')
if base:
base_fields = find_struct(base)['data']
if discriminator:
base_fields = base_fields.copy()
del base_fields[discriminator]
ret += generate_struct_fields(base_fields)
else:
assert not discriminator
ret += mcgen('''
};
''')
if discriminator == {}:
ret += mcgen('''
extern const int %(name)s_qtypes[];
''',
name=name)
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:",
["source", "header", "builtins",
"prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
do_builtins = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
elif o in ("-b", "--builtins"):
do_builtins = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include <stdbool.h>
#include <stdint.h>
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for typename in builtin_types:
fdecl.write(generate_fwd_struct(typename, None, builtin_type=True))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data']) + "\n"
ret += generate_fwd_enum_struct(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys()))
if expr.get('discriminator') == {}:
fdef.write(generate_anon_union_qtypes(expr))
else:
continue
fdecl.write(ret)
# to avoid header dependency hell, we always generate declarations
# for built-in types in our header files and simply guard them
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
for typename in builtin_types:
fdecl.write(generate_type_cleanup_decl(typename + "List"))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
# ...this doesn't work for cases where we link in multiple objects that
# have the functions defined, so we use -b option to provide control
# over these cases
if do_builtins:
fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for typename in builtin_types:
fdef.write(generate_type_cleanup(typename + "List"))
fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr)
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
elif expr.has_key('enum'):
ret += generate_type_cleanup_decl(expr['enum'] + "List")
fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
miselin/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/foundation/future.py
|
4
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A Future interface.
Python doesn't have a Future interface in its standard library. In the absence
of such a standard, three separate, incompatible implementations
(concurrent.futures.Future, ndb.Future, and asyncio.Future) have appeared. This
interface attempts to be as compatible as possible with
concurrent.futures.Future. From ndb.Future it adopts a traceback-object accessor
method.
Unlike the concrete and implemented Future classes listed above, the Future
class defined in this module is an entirely abstract interface that anyone may
implement and use.
The one known incompatibility between this interface and the interface of
concurrent.futures.Future is that this interface defines its own CancelledError
and TimeoutError exceptions rather than raising the implementation-private
concurrent.futures._base.CancelledError and the
built-in-but-only-in-3.3-and-later TimeoutError.
"""
import abc
import six
class TimeoutError(Exception):
"""Indicates that a particular call timed out."""
class CancelledError(Exception):
"""Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
"""A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
# NOTE(nathaniel): This isn't the return type that I would want to have if it
# were up to me. Were this interface being written from scratch, the return
# type of this method would probably be a sum type like:
#
# NOT_COMMENCED
# COMMENCED_AND_NOT_COMPLETED
# PARTIAL_RESULT<Partial_Result_Type>
# COMPLETED<Result_Type>
# UNCANCELLABLE
# NOT_IMMEDIATELY_DETERMINABLE
@abc.abstractmethod
def cancel(self):
"""Attempts to cancel the computation.
This method does not block.
Returns:
True if the computation has not yet begun, will not be allowed to take
place, and determination of both was possible without blocking. False
under all other circumstances including but not limited to the
computation's already having begun, the computation's already having
finished, and the computation's having been scheduled for execution on a
remote system for which a determination of whether or not it commenced
before being cancelled cannot be made without blocking.
"""
raise NotImplementedError()
# NOTE(nathaniel): Here too this isn't the return type that I'd want this
# method to have if it were up to me. I think I'd go with another sum type
# like:
#
# NOT_CANCELLED (this object's cancel method hasn't been called)
# NOT_COMMENCED
# COMMENCED_AND_NOT_COMPLETED
# PARTIAL_RESULT<Partial_Result_Type>
# COMPLETED<Result_Type>
# UNCANCELLABLE
# NOT_IMMEDIATELY_DETERMINABLE
#
# Notice how giving the cancel method the right semantics obviates most
# reasons for this method to exist.
@abc.abstractmethod
def cancelled(self):
"""Describes whether the computation was cancelled.
This method does not block.
Returns:
True if the computation was cancelled any time before its result became
immediately available. False under all other circumstances including but
not limited to this object's cancel method not having been called and
the computation's result having become immediately available.
"""
raise NotImplementedError()
@abc.abstractmethod
def running(self):
"""Describes whether the computation is taking place.
This method does not block.
Returns:
True if the computation is scheduled to take place in the future or is
taking place now, or False if the computation took place in the past or
was cancelled.
"""
raise NotImplementedError()
# NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
# would rather this only returned True in cases in which the underlying
# computation completed successfully. A computation's having been cancelled
# conflicts with considering that computation "done".
@abc.abstractmethod
def done(self):
"""Describes whether the computation has taken place.
This method does not block.
Returns:
True if the computation is known to have either completed or have been
unscheduled or interrupted. False if the computation may possibly be
executing or scheduled to execute later.
"""
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
"""Accesses the outcome of the computation or raises its exception.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
finish or be cancelled, or None if this method should block until the
computation has finished or is cancelled no matter how long that takes.
Returns:
The return value of the computation.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
"""Return the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The exception raised by the computation, or None if the computation did
not raise an exception.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The traceback of the exception raised by the computation, or None if the
computation did not raise an exception.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
"""Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
If the computation has already completed, the callback will be called
immediately.
Args:
fn: A callable taking a this Future object as its single parameter.
"""
raise NotImplementedError()
|
Deepakkothandan/ansible-modules-extras
|
refs/heads/devel
|
database/mssql/__init__.py
|
12133432
| |
PetrDlouhy/django
|
refs/heads/master
|
tests/view_tests/urls.py
|
12
|
# -*- coding: utf-8 -*-
from os import path
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.utils._os import upath
from django.utils.translation import ugettext_lazy as _
from django.views import defaults, i18n, static
from . import views
base_dir = path.dirname(path.abspath(upath(__file__)))
media_dir = path.join(base_dir, 'media')
locale_dir = path.join(base_dir, 'locale')
js_info_dict = {
'domain': 'djangojs',
'packages': ('view_tests',),
}
js_info_dict_english_translation = {
'domain': 'djangojs',
'packages': ('view_tests.app0',),
}
js_info_dict_multi_packages1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1', 'view_tests.app2'),
}
js_info_dict_multi_packages2 = {
'domain': 'djangojs',
'packages': ('view_tests.app3', 'view_tests.app4'),
}
js_info_dict_admin = {
'domain': 'djangojs',
'packages': ('django.contrib.admin', 'view_tests'),
}
js_info_dict_app1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1',),
}
js_info_dict_app2 = {
'domain': 'djangojs',
'packages': ('view_tests.app2',),
}
js_info_dict_app5 = {
'domain': 'djangojs',
'packages': ('view_tests.app5',),
}
urlpatterns = [
url(r'^$', views.index_page),
# Default views
url(r'^non_existing_url/', defaults.page_not_found),
url(r'^server_error/', defaults.server_error),
# a view that raises an exception for the debug view
url(r'raises/$', views.raises),
url(r'raises400/$', views.raises400),
url(r'raises403/$', views.raises403),
url(r'raises404/$', views.raises404),
url(r'raises500/$', views.raises500),
url(r'technical404/$', views.technical404, name="my404"),
url(r'classbased404/$', views.Http404View.as_view()),
# i18n views
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/$', i18n.javascript_catalog, js_info_dict),
url(r'^jsi18n/app1/$', i18n.javascript_catalog, js_info_dict_app1),
url(r'^jsi18n/app2/$', i18n.javascript_catalog, js_info_dict_app2),
url(r'^jsi18n/app5/$', i18n.javascript_catalog, js_info_dict_app5),
url(r'^jsi18n_english_translation/$', i18n.javascript_catalog, js_info_dict_english_translation),
url(r'^jsi18n_multi_packages1/$', i18n.javascript_catalog, js_info_dict_multi_packages1),
url(r'^jsi18n_multi_packages2/$', i18n.javascript_catalog, js_info_dict_multi_packages2),
url(r'^jsi18n_admin/$', i18n.javascript_catalog, js_info_dict_admin),
url(r'^jsi18n_template/$', views.jsi18n),
url(r'^jsi18n_multi_catalogs/$', views.jsi18n_multi_catalogs),
# Static views
url(r'^site_media/(?P<path>.*)$', static.serve, {'document_root': media_dir}),
]
urlpatterns += i18n_patterns(
url(_(r'^translated/$'), views.index_page, name='i18n_prefixed'),
)
urlpatterns += [
url(r'view_exception/(?P<n>[0-9]+)/$', views.view_exception, name='view_exception'),
url(r'template_exception/(?P<n>[0-9]+)/$', views.template_exception, name='template_exception'),
url(r'^raises_template_does_not_exist/(?P<path>.+)$', views.raises_template_does_not_exist, name='raises_template_does_not_exist'),
url(r'^render_no_template/$', views.render_no_template, name='render_no_template'),
]
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/IPython/utils/tests/test_localinterfaces.py
|
12
|
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from .. import localinterfaces
def test_load_ips():
# Override the machinery that skips it if it was called before
localinterfaces._load_ips.called = False
# Just check this doesn't error
localinterfaces._load_ips(suppress_exceptions=False)
|
kenkeiras/lxc-application-packages
|
refs/heads/master
|
lap/conventions.py
|
1
|
import os
from os.path import expanduser
LOCAL_PATH = os.path.join(expanduser("~"), '.local', 'share', 'lap')
|
liqi328/rjrepaircompany
|
refs/heads/master
|
django/contrib/gis/db/models/sql/aggregates.py
|
309
|
from django.db.models.sql.aggregates import *
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql.conversion import GeomField
class GeoAggregate(Aggregate):
# Default SQL template for spatial aggregates.
sql_template = '%(function)s(%(field)s)'
# Conversion class, if necessary.
conversion_class = None
# Flags for indicating the type of the aggregate.
is_extent = False
def __init__(self, col, source=None, is_summary=False, tolerance=0.05, **extra):
super(GeoAggregate, self).__init__(col, source, is_summary, **extra)
# Required by some Oracle aggregates.
self.tolerance = tolerance
# Can't use geographic aggregates on non-geometry fields.
if not isinstance(self.source, GeometryField):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if connection.ops.oracle:
self.extra['tolerance'] = self.tolerance
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
sql_template, sql_function = connection.ops.spatial_aggregate_sql(self)
params = {
'function': sql_function,
'field': field_name
}
params.update(self.extra)
return sql_template % params
class Collect(GeoAggregate):
pass
class Extent(GeoAggregate):
is_extent = '2D'
class Extent3D(GeoAggregate):
is_extent = '3D'
class MakeLine(GeoAggregate):
pass
class Union(GeoAggregate):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.