repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
gersolar/stations
|
refs/heads/master
|
stations/tests/units/test_measurements.py
|
1
|
# -*- coding: utf-8 -*-
from stations.models import *
from django.test import TestCase
from datetime import datetime
import pytz
class TestMeasurements(TestCase):
fixtures = [ 'initial_data.yaml', '*']
def setUp(self):
self.finish = datetime.utcnow().replace(tzinfo=pytz.UTC)
self.mean = 60
self.between = 600
self.refresh_presision = 1
self.configuration = Configuration.objects.filter(position__station__name = 'Luján')[0]
def test_register_or_check(self):
# check if that there aren't measurements.
self.assertEquals(Measurement.objects.count(), 0)
# check if the object is registed the first time.
m1 = Measurement.register_or_check(self.finish, self.mean, self.between, self.refresh_presision, self.configuration)
self.assertEquals(Measurement.objects.count(), 1)
# check if when saving it again avoid creating a new measurement in the database
# and return the same object.
m2 = Measurement.register_or_check(self.finish, self.mean, self.between, self.refresh_presision, self.configuration)
self.assertEquals(Measurement.objects.count(), 1)
# check if when saving it again without the same mean, between or presision
# raise an exception.
with self.assertRaises(InvalidMeasurementError):
m3 = Measurement.register_or_check(self.finish, self.mean + 1, self.between, self.refresh_presision, self.configuration)
self.assertEquals(Measurement.objects.count(), 1)
def test_serialization(self):
# check if the __str__ method is defined to return the object configuration, finish, mean and between attributes.
measurement = Measurement.register_or_check(self.finish, self.mean, self.between, self.refresh_presision, self.configuration)
result = u'%s %s %.2f (%i sec)' % (unicode(measurement.configuration), unicode(measurement.finish), self.mean, self.between)
self.assertEquals(str(measurement), result.encode("utf-8"))
# check if the __unicode__ method is defined to return the string of bytes as a text.
self.assertEquals(unicode(measurement), result)
|
gomiero/PTVS
|
refs/heads/master
|
Python/Tests/TestData/Outlining/Program.py
|
18
|
def f():
print('hello')
print('world')
print('!')
#comment
class C:
print('hello')
print('world')
print('!')
#comment
if True:
print('hello')
print('world')
print('!')
#comment
if True:
print('hello')
print('world')
print('!')
else:
print('hello')
print('world')
print('!')
#comment
if True:
print('hello')
print('world')
print('!')
elif True:
print('hello')
print('world')
print('!')
#comment
for i in xrange(100):
print('hello')
print('world')
print('!')
while True:
print('hello')
print('world')
print('!')
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/mover/multiLineSelection6.py
|
83
|
class Test(object):
def q(self):
c = 3
<caret><selection> a = 1
b = 2
</selection>
|
lizardsystem/lizard-workspace
|
refs/heads/master
|
lizard_workspace/testsettings.py
|
1
|
import os
from lizard_ui.settingshelper import setup_logging
from lizard_ui.settingshelper import STATICFILES_FINDERS
DEBUG = True
TEMPLATE_DEBUG = True
# SETTINGS_DIR allows media paths and so to be relative to this settings file
# instead of hardcoded to c:\only\on\my\computer.
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout, for instance for
# BUILDOUT_DIR/var/static files to give django-staticfiles a proper place
# to place all collected static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
LOGGING = setup_logging(BUILDOUT_DIR)
# ENGINE: 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# In case of geodatabase, prepend with:
# django.contrib.gis.db.backends.(postgis)
DATABASES = {
# If you want to use another database, consider putting the database
# settings in localsettings.py. Otherwise, if you change the settings in
# the current file and commit them to the repository, other developers will
# also use these settings whether they have that database or not.
# One of those other developers is Jenkins, our continuous integration
# solution. Jenkins can only run the tests of the current application when
# the specified database exists. When the tests cannot run, Jenkins sees
# that as an error.
'default': {
'NAME': os.path.join(BUILDOUT_DIR, 'var', 'sqlite', 'test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '', # empty string for localhost.
'PORT': '', # empty string for default.
}
}
SITE_ID = 1
INSTALLED_APPS = [
'lizard_workspace',
'lizard_ui',
'staticfiles',
'compressor',
'south',
'django_nose',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.gis',
'django.contrib.sites',
]
ROOT_URLCONF = 'lizard_workspace.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
# Uncomment this one if you use lizard-map.
# 'lizard_map.context_processors.processor.processor',
# Default django 1.3 processors.
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Used for django-staticfiles (and for media files
STATIC_URL = '/static_media/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
STATICFILES_FINDERS = STATICFILES_FINDERS
try:
# Import local settings that aren't stored in svn/git.
from lizard_workspace.local_testsettings import *
except ImportError:
pass
|
rapidsms/rapidsms-core-dev
|
refs/heads/master
|
lib/rapidsms/utils.py
|
7
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import pytz
from datetime import datetime
def empty_str(in_str):
"""
Simple helper to return True if the passed
string reference is None or '' or all whitespace
"""
if in_str is not None and not isinstance(in_str, basestring):
raise TypeError('Arg must be None or a string type')
return in_str is None or \
len(in_str.strip())==0
def to_naive_utc_dt(dt):
"""
Converts a datetime to a naive datetime (no tzinfo)
as follows:
if inbound dt is already naive, it just returns it
if inbound is timezone aware, converts it to UTC,
then strips the tzinfo
"""
if not isinstance(dt, datetime):
raise TypeError('Arg must be type datetime')
if dt.tzinfo is None:
return dt
return dt.astimezone(pytz.utc).replace(tzinfo=None)
def to_aware_utc_dt(dt):
"""
Convert an inbound datetime into a timezone
aware datetime in UTC as follows:
if inbound is naive, uses 'tzinfo.localize' to
add utc tzinfo. NOTE: Timevalues are not changed,
only difference in tzinfo is added to identify this
as a UTC tz aware object.
if inbound is aware, uses 'datetime.astimezone'
to convert timevalues to UTC and set tzinfo to
utc.
"""
if not isinstance(dt, datetime):
raise TypeError('Arg must be type datetime')
if dt.tzinfo is None:
return pytz.utc.localize(dt)
return dt.astimezone(pytz.utc)
def timedelta_as_minutes(td):
"""
Returns the value of the entire timedelta as
integer minutes, rounded down
"""
return timedelta_as_seconds(td)/60
def timedelta_as_seconds(td):
'''
Returns the value of the entire timedelta as
integer seconds, rounded down
'''
return td.days*86400+td.seconds
|
bennojoy/ansible
|
refs/heads/devel
|
plugins/inventory/apache-libcloud.py
|
89
|
#!/usr/bin/env python
# (c) 2013, Sebastien Goasguen <runseb@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Apache Libcloud generic external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Cloud providers using the Apache libcloud library.
This script also assumes there is a libcloud.ini file alongside it
'''
import sys
import os
import argparse
import re
from time import time
import ConfigParser
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security as sec
try:
import json
except ImportError:
import simplejson as json
class LibcloudInventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the libcloud.ini file '''
config = ConfigParser.SafeConfigParser()
libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
config.read(libcloud_ini_path)
if not config.has_section('driver'):
raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'):
self.provider = config.get('driver','provider')
else:
raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'):
self.key = config.get('driver','key')
else:
raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'):
self.secret = config.get('driver','secret')
else:
raise ValueError('libcloud.ini does not have a secret defined')
if config.has_option('driver', 'host'):
self.host = config.get('driver', 'host')
if config.has_option('driver', 'secure'):
self.secure = config.get('driver', 'secure')
if config.has_option('driver', 'verify_ssl_cert'):
self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
if config.has_option('driver', 'port'):
self.port = config.get('driver', 'port')
if config.has_option('driver', 'path'):
self.path = config.get('driver', 'path')
if config.has_option('driver', 'api_version'):
self.api_version = config.get('driver', 'api_version')
Driver = get_driver(getattr(Provider, self.provider))
self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
host=self.host, path=self.path)
# Cache related
cache_path = config.get('cache', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
'''
Do API calls to a location, and save data in cache files
'''
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
'''
Gets the list of all nodes
'''
for node in self.conn.list_nodes():
self.add_node(node)
def get_node(self, node_id):
'''
Gets details about a specific node
'''
return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node):
'''
Adds a node to the inventory and index, as long as it is
addressable
'''
# Only want running instances
if node.state != 0:
return
# Select the best destination address
if not node.public_ips == []:
dest = node.public_ips[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = node.name
# Inventory: Group by instance ID (always a group of 1)
self.inventory[node.name] = [dest]
'''
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, node.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
if node.extra['keyname']:
self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest)
# Inventory: Group by security group, quick thing to handle single sg
if node.extra['securitygroup']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest)
def get_host_info(self):
'''
Get variables about a specific host
'''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
if type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
'''
Pushed an element onto an array that may not have been defined in
the dict
'''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
'''
Reads the inventory from the cache file and returns it as a JSON
object
'''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
'''
Reads the index from the cache file sets self.index
'''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
'''
Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups
'''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
'''
Converts a dict to a JSON object and dumps it as a formatted
string
'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def main():
LibcloudInventory()
if __name__ == '__main__':
main()
|
alexanderfefelov/nav
|
refs/heads/master
|
python/nav/models/arnold.py
|
1
|
#
# Copyright (C) 2012 (SD -311000) UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Model definitions for arnold"""
#pylint: disable=R0903
from nav.models.fields import VarcharField
from nav.models.manage import Interface
from django.db import models
STATUSES = [
('enabled', 'Enabled'),
('disabled', 'Disabled'),
('quarantined', 'Quarantined')
]
DETENTION_TYPE_CHOICES = [('disable', 'Block'),
('quarantine', 'Quarantine')]
KEEP_CLOSED_CHOICES = [('n', 'Open on move'), ('y', 'All closed')]
class Identity(models.Model):
"""
The table contains a listing for each computer,interface combo Arnold
has blocked.
"""
id = models.AutoField(db_column='identityid', primary_key=True)
mac = models.CharField(db_column='mac', max_length=17)
status = VarcharField(db_column='blocked_status', choices=STATUSES)
justification = models.ForeignKey('Justification',
db_column='blocked_reasonid')
interface = models.ForeignKey(Interface, db_column='swportid')
ip = models.IPAddressField(null=True, default='0.0.0.0')
dns = VarcharField(blank=True)
netbios = VarcharField(blank=True)
first_offence = models.DateTimeField(db_column='starttime',
auto_now_add=True)
last_changed = models.DateTimeField(db_column='lastchanged', auto_now=True)
autoenable = models.DateTimeField(null=True)
autoenablestep = models.IntegerField(null=True, default=2)
mail = VarcharField(blank=True)
organization = models.ForeignKey('Organization', db_column='orgid',
null=True)
keep_closed = models.CharField(db_column='determined', default='n',
choices=KEEP_CLOSED_CHOICES)
fromvlan = models.IntegerField(null=True)
tovlan = models.ForeignKey('QuarantineVlan', db_column='tovlan',
to_field='vlan', null=True, default=None)
# If the interface does not exist any longer in the database, the user
# needs a hint of what interface was blocked as information as ifname
# and netbox naturally no longer exists based on interfaceid.
# This fields solves this by storing the textual representation of the
# interface, that can be displayed if the situation occurs.
# The format is "interface.ifname at interface.netbox.sysname"
textual_interface = VarcharField(default='')
def __unicode__(self):
try:
interface = self.interface
except Interface.DoesNotExist:
interface = "N/A"
return "%s/%s %s" % (self.ip, self.mac, interface)
class Meta:
db_table = 'identity'
ordering = ('last_changed', )
verbose_name = 'identity'
verbose_name_plural = 'identities'
unique_together = ('mac', 'interface')
class Event(models.Model):
"""A class representing an action taken"""
id = models.AutoField(db_column='eventid', primary_key=True)
identity = models.ForeignKey('Identity', db_column='identityid')
comment = VarcharField(db_column='event_comment', blank=True)
action = VarcharField(db_column='blocked_status', choices=STATUSES)
justification = models.ForeignKey('Justification',
db_column='blocked_reasonid')
event_time = models.DateTimeField(db_column='eventtime', auto_now_add=True)
autoenablestep = models.IntegerField(null=True)
executor = VarcharField(db_column='username')
def __unicode__(self):
return "%s: %s" % (self.action, self.event_time)
class Meta:
db_table = 'event'
ordering = ('event_time', )
class Justification(models.Model):
"""Represents the justification for an event"""
id = models.AutoField(db_column='blocked_reasonid', primary_key=True)
name = VarcharField()
description = VarcharField(db_column='comment', blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'blocked_reason'
ordering = ('name', )
class QuarantineVlan(models.Model):
"""A quarantine vlan is a vlan where offenders are placed"""
id = models.AutoField(db_column='quarantineid', primary_key=True)
vlan = models.IntegerField(unique=True)
description = VarcharField(blank=True)
def __unicode__(self):
return "%s - %s" % (self.vlan, self.description)
class Meta:
db_table = 'quarantine_vlans'
ordering = ('vlan',)
class DetentionProfile(models.Model):
"""A detention profile is a configuration used by an automatic detention"""
id = models.AutoField(db_column='blockid', primary_key=True)
name = VarcharField(db_column='blocktitle')
description = VarcharField(db_column='blockdesc', blank=True)
mailfile = VarcharField(blank=True)
justification = models.ForeignKey('Justification', db_column='reasonid')
keep_closed = models.CharField(db_column='determined', default='n',
choices=KEEP_CLOSED_CHOICES)
incremental = models.CharField(default='n')
duration = models.IntegerField(db_column='blocktime')
active = models.CharField(default='n')
last_edited = models.DateTimeField(db_column='lastedited',
auto_now_add=True)
edited_by = VarcharField(db_column='lastedituser')
inputfile = VarcharField(blank=True)
active_on_vlans = VarcharField(db_column='activeonvlans')
detention_type = VarcharField(db_column='detainmenttype',
choices=DETENTION_TYPE_CHOICES)
quarantine_vlan = models.ForeignKey('QuarantineVlan',
db_column='quarantineid', null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'block'
ordering = ('name', )
|
GarySparrow/mFlaskWeb
|
refs/heads/master
|
venv/Lib/site-packages/pygments/lexers/x10.py
|
25
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.x10
~~~~~~~~~~~~~~~~~~~
Lexers for the X10 programming language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['X10Lexer']
class X10Lexer(RegexLexer):
"""
For the X10 language.
.. versionadded:: 0.1
"""
name = 'X10'
aliases = ['x10', 'xten']
filenames = ['*.x10']
mimetypes = ['text/x-x10']
keywords = (
'as', 'assert', 'async', 'at', 'athome', 'ateach', 'atomic',
'break', 'case', 'catch', 'class', 'clocked', 'continue',
'def', 'default', 'do', 'else', 'final', 'finally', 'finish',
'for', 'goto', 'haszero', 'here', 'if', 'import', 'in',
'instanceof', 'interface', 'isref', 'new', 'offer',
'operator', 'package', 'return', 'struct', 'switch', 'throw',
'try', 'type', 'val', 'var', 'when', 'while'
)
types = (
'void'
)
values = (
'false', 'null', 'self', 'super', 'this', 'true'
)
modifiers = (
'abstract', 'extends', 'implements', 'native', 'offers',
'private', 'property', 'protected', 'public', 'static',
'throws', 'transient'
)
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'\b(%s)\b' % '|'.join(types), Keyword.Type),
(r'\b(%s)\b' % '|'.join(values), Keyword.Constant),
(r'\b(%s)\b' % '|'.join(modifiers), Keyword.Declaration),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'.', Text)
],
}
|
mmedal/Magic_the_Slackening
|
refs/heads/master
|
magicbot/views.py
|
2
|
import urllib
from bs4 import BeautifulSoup
import requests
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from aliases import CARD_ALIASES, SET_ALIASES
GATHERER_URI = 'http://gatherer.wizards.com/Handlers/Image.ashx?type=card'
class WelcomeView(APIView):
"""This welcome view lets "Deploy to Heroku" users know that their deploy was successful."""
permission_classes = ()
def get(self, request):
return Response({
'text': 'Welcome to the magicbot API. Configure your Slack outgoing webhooks to make use of it!'
})
class MagicCardView(APIView):
"""Slack webhook interface for returning details of magic card."""
def post(self, request):
if 'text' not in request.data:
raise ParseError(detail='No query text was provided.')
command = request.data['text']
if command[:9] != 'magicbot:':
raise ParseError(detail='Text query must begin with "magicbot:".')
# Get set name first
set_code = ''
if '\\\\' in command:
csplit = command.split('\\\\')
set_name = csplit[1].strip(' ').lower()
set_code = SET_ALIASES.get(set_name, '')
command = csplit[0]
# The 9: strips magicbot from the command
card_name = command.encode('utf-8')[9:].strip(' ')
# try to derive the card name from a fragment
cards_json = requests.get('http://gatherer.wizards.com/Handlers/InlineCardSearch.ashx?nameFragment=%s' % card_name).json()
if len(cards_json['Results']) > 0:
card_name = cards_json['Results'][0]['Name']
# Catch Slack's garbage /u2019 in the name of Manor Skeleton
try:
card_name = card_name.decode('utf-8').replace(u'\u2019', u'\'')
except Exception as e:
print e
# Assign aliases
if card_name.lower() in CARD_ALIASES:
card_name = CARD_ALIASES[card_name.lower()]
# Get card image uri
card_img_uri = '{}&name={}&set={}'.format(
GATHERER_URI, urllib.quote_plus(card_name), set_code)
# Get card price
set_code = set_code.upper()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.0) Gecko/20100101 Firefox/14.0.1'}
redirected = False
base_uri = 'http://www.mtggoldfish.com'
query_uri = '{}/q?query_string={}&set_id={}'.format(base_uri, card_name, set_code)
r = requests.get(query_uri, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
if card_name in soup.title.string.lower():
redirected = True
price_uri = ''
if not redirected:
def is_newline(iterable):
return iterable != '\n'
for result in soup.find_all('tr'):
row = filter(is_newline, result.contents)
card_parsed = filter(is_newline, row[0].contents)
set_parsed = filter(is_newline, row[1].contents)
if set_code:
if set_code == set_parsed[0]['alt']:
price_uri = '{}{}#paper'.format(base_uri, card_parsed[0]['href'])
break
else:
price_uri = '{}{}#paper'.format(base_uri, card_parsed[0]['href'])
break
if price_uri or redirected:
if not redirected:
r = requests.get(price_uri, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
def is_price_field(tag):
if tag.has_attr('class'):
if tag['class'][0] == 'price-box-price':
return True
return False
try:
price = 'Current median price: ${}'.format(soup.find_all(is_price_field)[1].string)
except:
price = 'Current median price: ${}'.format(soup.find_all(is_price_field)[0].string)
else:
price = 'Current median price: ??'
print '{} {}'.format(card_img_uri, price)
return Response({
'text': '{} {}'.format(card_img_uri, price)
})
|
benoitsteiner/tensorflow-opencl
|
refs/heads/master
|
tensorflow/python/ops/distributions/bernoulli.py
|
37
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Bernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
class Bernoulli(distribution.Distribution):
"""Bernoulli distribution.
The Bernoulli distribution with `probs` parameter, i.e., the probability of a
`1` outcome (vs a `0` outcome).
"""
def __init__(self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Bernoulli"):
"""Construct Bernoulli distributions.
Args:
logits: An N-D `Tensor` representing the log-odds of a `1` event. Each
entry in the `Tensor` parametrizes an independent Bernoulli distribution
where the probability of an event is sigmoid(logits). Only one of
`logits` or `probs` should be passed in.
probs: An N-D `Tensor` representing the probability of a `1`
event. Each entry in the `Tensor` parameterizes an independent
Bernoulli distribution. Only one of `logits` or `probs` should be passed
in.
dtype: The type of the event samples. Default: `int32`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If p and logits are passed, or if neither are passed.
"""
parameters = locals()
with ops.name_scope(name):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
name=name)
super(Bernoulli, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits, self._probs],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._logits)
def _batch_shape(self):
return self._logits.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.probs.dtype)
sample = math_ops.less(uniform, self.probs)
return math_ops.cast(sample, self.dtype)
def _log_prob(self, event):
if self.validate_args:
event = distribution_util.embed_check_integer_casting_closed(
event, target_dtype=dtypes.bool)
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
def _broadcast(logits, event):
return (array_ops.ones_like(event) * logits,
array_ops.ones_like(logits) * event)
# First check static shape.
if (event.get_shape().is_fully_defined() and
logits.get_shape().is_fully_defined()):
if event.get_shape() != logits.get_shape():
logits, event = _broadcast(logits, event)
else:
logits, event = control_flow_ops.cond(
distribution_util.same_dynamic_shape(logits, event),
lambda: (logits, event),
lambda: _broadcast(logits, event))
return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
def _prob(self, event):
return math_ops.exp(self._log_prob(event))
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
def _mean(self):
return array_ops.identity(self.probs)
def _variance(self):
return self._mean() * (1. - self.probs)
def _mode(self):
"""Returns `1` if `prob > 0.5` and `0` otherwise."""
return math_ops.cast(self.probs > 0.5, self.dtype)
class BernoulliWithSigmoidProbs(Bernoulli):
"""Bernoulli with `probs = nn.sigmoid(logits)`."""
def __init__(self,
logits=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="BernoulliWithSigmoidProbs"):
parameters = locals()
with ops.name_scope(name):
super(BernoulliWithSigmoidProbs, self).__init__(
probs=nn.sigmoid(logits, name="sigmoid_probs"),
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli",
values=[a.logits, b.logits]):
delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)
delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
return (math_ops.sigmoid(a.logits) * delta_probs0
+ math_ops.sigmoid(-a.logits) * delta_probs1)
|
wkcn/mobula
|
refs/heads/master
|
mobula/__init__.py
|
1
|
from .Net import *
from .wrapper import *
|
direvus/ansible
|
refs/heads/devel
|
test/units/plugins/connection/test_network_cli.py
|
5
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from io import StringIO
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleConnectionFailure
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
class TestConnectionClass(unittest.TestCase):
@patch("ansible.plugins.connection.paramiko_ssh.Connection._connect")
def test_network_cli__connect_error(self, mocked_super):
pc = PlayContext()
pc.network_os = 'ios'
conn = connection_loader.get('network_cli', pc, '/dev/null')
conn.ssh = MagicMock()
conn.receive = MagicMock()
conn._network_os = 'does not exist'
self.assertRaises(AnsibleConnectionFailure, conn._connect)
def test_network_cli__invalid_os(self):
pc = PlayContext()
pc.network_os = None
self.assertRaises(AnsibleConnectionFailure, connection_loader.get, 'network_cli', pc, '/dev/null')
@patch("ansible.plugins.connection.network_cli.terminal_loader")
@patch("ansible.plugins.connection.paramiko_ssh.Connection._connect")
def test_network_cli__connect(self, mocked_super, mocked_terminal_loader):
pc = PlayContext()
pc.network_os = 'ios'
conn = connection_loader.get('network_cli', pc, '/dev/null')
conn.ssh = MagicMock()
conn.receive = MagicMock()
conn._connect()
self.assertTrue(conn._terminal.on_open_shell.called)
self.assertFalse(conn._terminal.on_become.called)
conn._play_context.become = True
conn._play_context.become_method = 'enable'
conn._play_context.become_pass = 'password'
conn._connected = False
conn._connect()
conn._terminal.on_become.assert_called_with(passwd='password')
@patch("ansible.plugins.connection.paramiko_ssh.Connection.close")
def test_network_cli_close(self, mocked_super):
pc = PlayContext()
pc.network_os = 'ios'
conn = connection_loader.get('network_cli', pc, '/dev/null')
terminal = MagicMock(supports_multiplexing=False)
conn._terminal = terminal
conn._ssh_shell = MagicMock()
conn.paramiko_conn = MagicMock()
conn._connected = True
conn.close()
self.assertTrue(terminal.on_close_shell.called)
self.assertIsNone(conn._ssh_shell)
self.assertIsNone(conn.paramiko_conn)
@patch("ansible.plugins.connection.paramiko_ssh.Connection._connect")
def test_network_cli_exec_command(self, mocked_super):
pc = PlayContext()
pc.network_os = 'ios'
conn = connection_loader.get('network_cli', pc, '/dev/null')
mock_send = MagicMock(return_value=b'command response')
conn.send = mock_send
conn._ssh_shell = MagicMock()
# test sending a single command and converting to dict
out = conn.exec_command('command')
self.assertEqual(out, b'command response')
mock_send.assert_called_with(command=b'command')
# test sending a json string
out = conn.exec_command(json.dumps({'command': 'command'}))
self.assertEqual(out, b'command response')
mock_send.assert_called_with(command=b'command')
def test_network_cli_send(self):
pc = PlayContext()
pc.network_os = 'ios'
conn = connection_loader.get('network_cli', pc, '/dev/null')
mock__terminal = MagicMock()
mock__terminal.terminal_stdout_re = [re.compile(b'device#')]
mock__terminal.terminal_stderr_re = [re.compile(b'^ERROR')]
conn._terminal = mock__terminal
mock__shell = MagicMock()
conn._ssh_shell = mock__shell
response = b"""device#command
command response
device#
"""
mock__shell.recv.return_value = response
output = conn.send(b'command', None, None, None)
mock__shell.sendall.assert_called_with(b'command\r')
self.assertEqual(output, 'command response')
mock__shell.reset_mock()
mock__shell.recv.return_value = b"ERROR: error message device#"
with self.assertRaises(AnsibleConnectionFailure) as exc:
conn.send(b'command', None, None, None)
self.assertEqual(str(exc.exception), 'ERROR: error message device#')
|
Houzz/annoy2
|
refs/heads/master
|
test/b_test.py
|
1
|
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# TeamCity fails to run this test because it can't import the C++ module.
# I think it's because the C++ part gets built in another directory.
import unittest
import random
import numpy
from annoy import AnnoyIndex
import os
try:
xrange
except NameError:
# Python 3 compat
xrange = range
class TestCase(unittest.TestCase):
def assertAlmostEquals(self, x, y):
# Annoy uses float precision, so we override the default precision
super(TestCase, self).assertAlmostEquals(x, y, 4)
class AngularIndexTest(TestCase):
def t1est_dist(self):
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 2
i = AnnoyIndex(f, 2, "test_db", 64, 1000, 3048576000)
print "creating object"
i.add_item(0, [0, 1])
i.add_item(1, [1, 1])
print "creating object"
self.assertAlmostEqual(i.get_distance(0, 1), 2 * (1.0 - 2 ** -0.5))
print "done"
def t1est_dist_2(self):
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 2
i = AnnoyIndex(f, 2, "test_db", 64, 1000, 3048576000)
i.add_item(0, [1000, 0])
i.add_item(1, [10, 0])
self.assertAlmostEqual(i.get_distance(0, 1), 0)
def t1est_dist_3(self):
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 2
i = AnnoyIndex(f, 2, "test_db", 64, 1000, 3048576000)
i.add_item(0, [97, 0])
i.add_item(1, [42, 42])
dist = (1 - 2 ** -0.5) ** 2 + (2 ** -0.5) ** 2
self.assertAlmostEqual(i.get_distance(0, 1), dist)
def t1est_dist_degen(self):
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 2
i = AnnoyIndex(f, 2, "test_db", 64, 1000, 3048576000)
i.add_item(0, [1, 0])
i.add_item(1, [0, 0])
self.assertAlmostEqual(i.get_distance(0, 1), 2.0)
def test_get_nns_by_vector(self):
print "test_get_nns_by_vector "
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 3
i = AnnoyIndex(f, 3, "test_db", 10, 1000, 3048576000)
i.add_item(0, [0, 0, 1])
i.add_item(1, [0, 1, 0])
i.add_item(2, [1, 0, 0])
self.assertEqual(i.get_nns_by_vector([3, 2, 1], 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_vector([1, 2, 3], 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_vector([2, 0, 1], 3), [2, 0, 1])
def t1est_get_nns_by_item(self):
print "test_get_nns_by_item "
os.system("rm -rf test_db")
os.system("mkdir test_db")
f = 3
i = AnnoyIndex(f, 3, "test_db", 10, 1000, 3048576000)
i.add_item(0, [2, 1, 0])
i.add_item(1, [1, 2, 0])
i.add_item(2, [0, 0, 1])
self.assertEqual(i.get_nns_by_item(0, 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_item(1, 3), [1, 0, 2])
self.assertTrue(i.get_nns_by_item(2, 3) in [[2, 0, 1], [2, 1, 0]]) # could be either
def t1est_large_index(self):
os.system("rm -rf test_db")
os.system("mkdir test_db")
# Generate pairs of random points where the pair is super close
f = 10
i = AnnoyIndex(f, 10, "test_db", 10, 1000, 3048576000)
for j in xrange(0, 10000, 2):
p = [random.gauss(0, 1) for z in xrange(f)]
f1 = random.random() + 1
f2 = random.random() + 1
x = [f1 * pi + random.gauss(0, 1e-2) for pi in p]
y = [f2 * pi + random.gauss(0, 1e-2) for pi in p]
i.add_item(j, x)
i.add_item(j+1, y)
for j in xrange(0, 10000, 2):
self.assertEqual(i.get_nns_by_item(j, 2), [j, j+1])
self.assertEqual(i.get_nns_by_item(j+1, 2), [j+1, j])
if __name__ == '__main__':
unittest.main()
|
aristanetworks/arista-ovs-nova
|
refs/heads/master
|
nova/api/openstack/compute/ips.py
|
4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def make_network(elem):
elem.set('id', 0)
ip = xmlutil.SubTemplateElement(elem, 'ip', selector=1)
ip.set('version')
ip.set('addr')
network_nsmap = {None: xmlutil.XMLNS_V11}
class NetworkTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('network', selector=sel)
make_network(root)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class AddressesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('addresses', selector='addresses')
elem = xmlutil.SubTemplateElement(root, 'network',
selector=xmlutil.get_items)
make_network(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class Controller(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
_view_builder_class = view_addresses.ViewBuilder
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self._compute_api = nova.compute.API()
def _get_instance(self, context, server_id):
try:
instance = self._compute_api.get(context, server_id)
except nova.exception.NotFound:
msg = _("Instance does not exist")
raise exc.HTTPNotFound(explanation=msg)
return instance
def create(self, req, server_id, body):
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
raise exc.HTTPNotImplemented()
@wsgi.serializers(xml=AddressesTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
@wsgi.serializers(xml=NetworkTemplate)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
def create_resource():
return wsgi.Resource(Controller())
|
luillyfe/GAE
|
refs/heads/master
|
models.py
|
1
|
# -*- coding: utf-8 -*-
# Aqui reposan los modelos que utilizara la aplicacion para
# soportar cada una de sus funcionalidades
from google.appengine.ext import ndb
from google.appengine.api import users
class UserApp( ndb.Model ):
"""Modela un usuario de la aplicación."""
user = ndb.UserProperty()
name = ndb.StringProperty()
lastname = ndb.StringProperty()
class Greeting(ndb.Model):
"""Modela una entrada individual en el libro de invitados."""
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
|
gocardless/gocardless-pro-python
|
refs/heads/master
|
tests/integration/payout_items_integration_test.py
|
1
|
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import json
import requests
import responses
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_none,
assert_is_not_none,
assert_not_equal,
assert_raises
)
from gocardless_pro.errors import MalformedResponseError
from gocardless_pro import resources
from gocardless_pro import list_response
from .. import helpers
@responses.activate
def test_payout_items_list():
fixture = helpers.load_fixture('payout_items')['list']
helpers.stub_response(fixture)
response = helpers.client.payout_items.list(*fixture['url_params'])
body = fixture['body']['payout_items']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.PayoutItem)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal([r.amount for r in response.records],
[b.get('amount') for b in body])
assert_equal([r.taxes for r in response.records],
[b.get('taxes') for b in body])
assert_equal([r.type for r in response.records],
[b.get('type') for b in body])
@responses.activate
def test_timeout_payout_items_list_retries():
fixture = helpers.load_fixture('payout_items')['list']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payout_items.list(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payout_items']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.PayoutItem)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
def test_502_payout_items_list_retries():
fixture = helpers.load_fixture('payout_items')['list']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payout_items.list(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payout_items']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.PayoutItem)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
@responses.activate
def test_payout_items_all():
fixture = helpers.load_fixture('payout_items')['list']
def callback(request):
if 'after=123' in request.url:
fixture['body']['meta']['cursors']['after'] = None
else:
fixture['body']['meta']['cursors']['after'] = '123'
return [200, {}, json.dumps(fixture['body'])]
url = 'http://example.com' + fixture['path_template']
responses.add_callback(fixture['method'], url, callback)
all_records = list(helpers.client.payout_items.all())
assert_equal(len(all_records), len(fixture['body']['payout_items']) * 2)
for record in all_records:
assert_is_instance(record, resources.PayoutItem)
|
ehsangolshani/crazy-hamster
|
refs/heads/master
|
.venv/lib/python3.5/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/requests/certs.py
|
516
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
duqiao/django
|
refs/heads/master
|
tests/m2m_signals/models.py
|
448
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Part(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=20)
default_parts = models.ManyToManyField(Part)
optional_parts = models.ManyToManyField(Part, related_name='cars_optional')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class SportsCar(Car):
price = models.IntegerField()
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=20)
fans = models.ManyToManyField('self', related_name='idols', symmetrical=False)
friends = models.ManyToManyField('self')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
|
ehogan/iris
|
refs/heads/master
|
lib/iris/tests/experimental/__init__.py
|
17
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Experimental code is tested in this package.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
|
edallison/oppia
|
refs/heads/develop
|
core/storage/email/gae_models_test.py
|
1
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import types
from core.platform import models
from core.tests import test_utils
import feconf
(email_models,) = models.Registry.import_models([models.NAMES.email])
class SentEmailModelUnitTests(test_utils.GenericTestBase):
"""Test the SentEmailModel class."""
def setUp(self):
super(SentEmailModelUnitTests, self).setUp()
# pylint: disable=unused-argument
def _generate_hash_for_tests(
cls, recipient_id, email_subject, email_body):
return 'Email Hash'
self.generate_constant_hash_ctx = self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(_generate_hash_for_tests,
email_models.SentEmailModel))
def test_saved_model_can_be_retrieved_with_same_hash(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
query = email_models.SentEmailModel.query()
query = query.filter(
email_models.SentEmailModel.email_hash == 'Email Hash')
results = query.fetch(2)
self.assertEqual(len(results), 1)
query = email_models.SentEmailModel.query()
query = query.filter(
email_models.SentEmailModel.email_hash == 'Bad Email Hash')
results = query.fetch(2)
self.assertEqual(len(results), 0)
def test_get_by_hash_works_correctly(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash('Email Hash')
self.assertEqual(len(results), 1)
results = email_models.SentEmailModel.get_by_hash('Bad Email Hash')
self.assertEqual(len(results), 0)
def test_get_by_hash_returns_multiple_models_with_same_hash(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash('Email Hash')
self.assertEqual(len(results), 2)
def test_get_by_hash_behavior_with_sent_datetime_lower_bound(self):
with self.generate_constant_hash_ctx:
time_now = datetime.datetime.utcnow()
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_now)
self.assertEqual(len(results), 1)
time_now1 = datetime.datetime.utcnow()
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_now1)
self.assertEqual(len(results), 0)
time_before = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=10))
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_before)
self.assertEqual(len(results), 1)
# Check that it accepts only DateTime objects.
with self.assertRaises(Exception):
results = email_models.SentEmailModel.get_by_hash(
'Email Hash',
sent_datetime_lower_bound='Not a datetime object')
class GenerateHashTests(test_utils.GenericTestBase):
"""Test that generating hash functionality works as expected."""
def test_same_inputs_always_gives_same_hashes(self):
# pylint: disable=protected-access
email_hash1 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
self.assertEqual(email_hash1, email_hash2)
# pylint: enable=protected-access
def test_different_inputs_give_different_hashes(self):
# pylint: disable=protected-access
email_hash1 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body2')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id2', 'email_subject', 'email_html_body')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject2', 'email_html_body')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id2', 'email_subject2', 'email_html_body2')
self.assertNotEqual(email_hash1, email_hash2)
# pylint: enable=protected-access
class BulkEmailModelsTest(test_utils.GenericTestBase):
def test_that_number_of_emails_sent_to_user_is_correct(self):
recipient_ids = ['recipient1', 'recipient2']
email_models.BulkEmailModel.create(
'id', recipient_ids, 'sender', 'sender@example.com',
feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION, 'subject', 'body',
datetime.datetime.utcnow())
messages1 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient1', feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION))
self.assertEqual(messages1, 1)
messages2 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient2', feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION))
self.assertEqual(messages2, 1)
messages3 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient1', feconf.BULK_EMAIL_INTENT_MARKETING))
self.assertEqual(messages3, 0)
messages4 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient2', feconf.BULK_EMAIL_INTENT_MARKETING))
self.assertEqual(messages4, 0)
messages5 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient3', feconf.BULK_EMAIL_INTENT_MARKETING))
self.assertEqual(messages5, 0)
messages6 = (
email_models.BulkEmailModel.get_number_of_emails_sent_to_user(
'recipient3', feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION))
self.assertEqual(messages6, 0)
|
pczerkas/tempest
|
refs/heads/master
|
tempest/services/identity/v3/json/credentials_client.py
|
8
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.common import service_client
class CredentialsClient(service_client.ServiceClient):
api_version = "v3"
def create_credential(self, access_key, secret_key, user_id, project_id):
"""Creates a credential."""
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
access_key, secret_key)
post_body = {
"blob": blob,
"project_id": project_id,
"type": "ec2",
"user_id": user_id
}
post_body = json.dumps({'credential': post_body})
resp, body = self.post('credentials', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return service_client.ResponseBody(resp, body)
def update_credential(self, credential_id, **kwargs):
"""Updates a credential."""
body = self.get_credential(credential_id)['credential']
cred_type = kwargs.get('type', body['type'])
access_key = kwargs.get('access_key', body['blob']['access'])
secret_key = kwargs.get('secret_key', body['blob']['secret'])
project_id = kwargs.get('project_id', body['project_id'])
user_id = kwargs.get('user_id', body['user_id'])
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
access_key, secret_key)
post_body = {
"blob": blob,
"project_id": project_id,
"type": cred_type,
"user_id": user_id
}
post_body = json.dumps({'credential': post_body})
resp, body = self.patch('credentials/%s' % credential_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return service_client.ResponseBody(resp, body)
def get_credential(self, credential_id):
"""To GET Details of a credential."""
resp, body = self.get('credentials/%s' % credential_id)
self.expected_success(200, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return service_client.ResponseBody(resp, body)
def list_credentials(self):
"""Lists out all the available credentials."""
resp, body = self.get('credentials')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_credential(self, credential_id):
"""Deletes a credential."""
resp, body = self.delete('credentials/%s' % credential_id)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
|
enthought/etsproxy
|
refs/heads/master
|
enthought/traits/ui/wx/list_editor.py
|
1
|
# proxy module
from traitsui.wx.list_editor import *
|
DR08/mxnet
|
refs/heads/stable
|
example/warpctc/lstm_model.py
|
28
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, lstm_inference_symbol
class LSTMInferenceModel(object):
def __init__(self,
num_lstm_layer,
seq_len,
num_hidden,
num_label,
arg_params,
data_size,
ctx=mx.cpu()):
self.sym = lstm_inference_symbol(num_lstm_layer,
seq_len,
num_hidden,
num_label)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
data_shape = [("data", (batch_size, data_size))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=ctx, **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(num_lstm_layer):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob
|
sdcooke/django
|
refs/heads/master
|
django/contrib/gis/gdal/prototypes/geom.py
|
450
|
from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_envelope
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int_output, srs_output,
string_output, void_output,
)
# ### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = bool
return f
# ### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii')
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii')
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii')
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii')
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint,
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False
)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
|
Geode/geonode
|
refs/heads/master
|
geonode/services/admin.py
|
7
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib import admin
from geonode.services.models import Service
from geonode.base.admin import ResourceBaseAdminForm
class ServiceAdminForm(ResourceBaseAdminForm):
class Meta:
model = Service
fields = '__all__'
class ServiceAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'type', 'method')
list_display_links = ('id', 'name', )
list_filter = ('type', 'method')
form = ServiceAdminForm
admin.site.register(Service, ServiceAdmin)
|
jctanner/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py
|
35
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def thingtocall():
return "thingtocall in mod_in_subpkg_with_init"
|
IndraVikas/scikit-learn
|
refs/heads/master
|
examples/covariance/plot_covariance_estimation.py
|
250
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
|
AmrnotAmr/zato
|
refs/heads/master
|
code/zato-server/src/zato/server/service/internal/security/wss.py
|
6
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from traceback import format_exc
from uuid import uuid4
# Zato
from zato.common import SEC_DEF_TYPE
from zato.common.broker_message import SECURITY
from zato.common.odb.model import Cluster, WSSDefinition
from zato.common.odb.query import wss_list
from zato.server.service import Boolean, Integer
from zato.server.service.internal import AdminService, AdminSIO, ChangePasswordBase
class GetList(AdminService):
""" Returns a list of WS-Security definitions available.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_wss_get_list_request'
response_elem = 'zato_security_wss_get_list_response'
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'password_type', 'username',
Boolean('reject_empty_nonce_creat'), Boolean('reject_stale_tokens'), Integer('reject_expiry_limit'),
Integer('nonce_freshness_time'))
def get_data(self, session):
return wss_list(session, self.request.input.cluster_id, False)
def handle(self):
with closing(self.odb.session()) as session:
self.response.payload[:] = self.get_data(session)
class Create(AdminService):
""" Creates a new WS-Security definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_wss_create_request'
response_elem = 'zato_security_wss_create_response'
input_required = ('cluster_id', 'name', 'is_active', 'username',
'password_type', Boolean('reject_empty_nonce_creat'), Boolean('reject_stale_tokens'),
Integer('reject_expiry_limit'), Integer('nonce_freshness_time'))
output_required = ('id', 'name')
def handle(self):
input = self.request.input
with closing(self.odb.session()) as session:
cluster = session.query(Cluster).filter_by(id=input.cluster_id).first()
# Let's see if we already have a definition of that name before committing
# any stuff into the database.
existing_one = session.query(WSSDefinition).\
filter(Cluster.id==input.cluster_id).\
filter(WSSDefinition.name==input.name).first()
if existing_one:
raise Exception('WS-Security definition [{0}] already exists on this cluster'.format(input.name))
password = uuid4().hex
try:
wss = WSSDefinition(
None, input.name, input.is_active, input.username,
password, input.password_type, input.reject_empty_nonce_creat,
input.reject_stale_tokens, input.reject_expiry_limit, input.nonce_freshness_time,
cluster)
session.add(wss)
session.commit()
except Exception, e:
msg = "Could not create a WS-Security definition, e:[{e}]".format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.WSS_CREATE.value
input.password = password
input.sec_type = SEC_DEF_TYPE.WSS
self.broker_client.publish(self.request.input)
self.response.payload.id = wss.id
self.response.payload.name = input.name
class Edit(AdminService):
""" Updates a WS-S definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_wss_edit_request'
response_elem = 'zato_security_wss_edit_response'
input_required = (
'id', 'cluster_id', 'name', 'is_active', 'username',
'password_type', Boolean('reject_empty_nonce_creat'), Boolean('reject_stale_tokens'),
Integer('reject_expiry_limit'), Integer('nonce_freshness_time'))
output_required = ('id', 'name')
def handle(self):
input = self.request.input
with closing(self.odb.session()) as session:
existing_one = session.query(WSSDefinition).\
filter(Cluster.id==input.cluster_id).\
filter(WSSDefinition.name==input.name).\
filter(WSSDefinition.id!=input.id).\
first()
if existing_one:
raise Exception('WS-Security definition [{0}] already exists on this cluster'.format(input.name))
try:
wss = session.query(WSSDefinition).filter_by(id=input.id).one()
old_name = wss.name
wss.name = input.name
wss.is_active = input.is_active
wss.username = input.username
wss.password_type = input.password_type
wss.reject_empty_nonce_creat = input.reject_empty_nonce_creat
wss.reject_stale_tokens = input.reject_stale_tokens
wss.reject_expiry_limit = input.reject_expiry_limit
wss.nonce_freshness_time = input.nonce_freshness_time
session.add(wss)
session.commit()
except Exception, e:
msg = "Could not update the WS-Security definition, e:[{e}]".format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.WSS_EDIT.value
input.old_name = old_name
input.sec_type = SEC_DEF_TYPE.WSS
self.broker_client.publish(self.request.input)
self.response.payload.id = input.id
self.response.payload.name = input.name
class ChangePassword(ChangePasswordBase):
""" Changes the password of a WS-Security definition.
"""
class SimpleIO(ChangePasswordBase.SimpleIO):
request_elem = 'zato_security_wss_change_password_request'
response_elem = 'zato_security_wss_change_password_response'
def handle(self):
def _auth(instance, password):
instance.password = password
return self._handle(WSSDefinition, _auth, SECURITY.WSS_CHANGE_PASSWORD.value)
class Delete(AdminService):
""" Deletes a WS-Security definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_wss_delete_request'
response_elem = 'zato_security_wss_delete_response'
input_required = ('id',)
def handle(self):
with closing(self.odb.session()) as session:
try:
wss = session.query(WSSDefinition).\
filter(WSSDefinition.id==self.request.input.id).\
one()
session.delete(wss)
session.commit()
except Exception, e:
msg = "Could not delete the WS-Security definition, e:[{e}]".format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
self.request.input.action = SECURITY.WSS_DELETE.value
self.request.input.name = wss.name
self.broker_client.publish(self.request.input)
|
WiproOpenSourcePractice/bdreappstore
|
refs/heads/develop
|
enu/real_time_event_detection/hadoopstream/reducer_test.py
|
1
|
#!/usr/bin/env python
import sys
import os
os.environ['MPLCONFIGDIR'] = "/tmp/"
import pandas as pd
import numpy as np
import commands
import pickle as p
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn import preprocessing
from sklearn.externals import joblib
current_key = None
key = None
vecList = []
nameList = []
#classList = []
def qt_rmvd( string ):
string = string.strip()
if string.startswith("'") and string.endswith("'"):
string = string[1:-1]
return string
#commands.getoutput("rm Schlumberger-SVM*")
commands.getoutput("hadoop fs -get /user/dropuser/schlumberger-model/* ./")
clf = joblib.load("Schlumberger-SVM.pkl")
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
key, values = line.split('\t', 1)
values = values[1:-1]
values = values.split(",")
day = qt_rmvd(values[0])
values = values[1:]
if current_key is None:
current_key = key
if current_key == key:
vec = [float(qt_rmvd(x)) for x in values[:-1]]
vecList.append(vec)
nameList.append([key,day])
else:
vecList = np.asarray(vecList)
min_max_scaler = preprocessing.MinMaxScaler()
vecList = min_max_scaler.fit_transform(vecList)
pred = clf.predict(vecList)
for i in range(len(pred)):
print str(nameList[i][0]).strip()+","+str(nameList[i][1])+","+str(pred[i])
vecList = []
nameList = []
current_key = key
vec = [float(qt_rmvd(x)) for x in values[:-1]]
vecList.append(vec)
nameList.append([key,day])
if len(vecList) > 0:
vecList = np.asarray(vecList)
min_max_scaler = preprocessing.MinMaxScaler()
vecList = min_max_scaler.fit_transform(vecList)
pred = clf.predict(vecList)
for i in range(len(pred)):
print str(nameList[i][0]).strip()+","+str(nameList[i][1])+","+str(pred[i])
op = commands.getoutput("hadoop fs -rm /user/dropuser/schlumberger-result/*")
|
argriffing/scipy
|
refs/heads/master
|
scipy/special/tests/test_gammainc.py
|
48
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.special import gammainc
from scipy.special._testutils import FuncData
def test_line():
# Test on the line a = x where a simpler asymptotic expansion
# (analog of DLMF 8.12.15) is available.
def gammainc_line(x):
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
x = np.logspace(np.log10(25), 300, 500)
a = x.copy()
dataset = np.vstack((a, x, gammainc_line(x))).T
FuncData(gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
|
udrg/crazyflie-clients-python
|
refs/heads/develop
|
lib/cfclient/utils/__init__.py
|
32
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Various utilities used by the user interface.
"""
|
commonwealth-of-puerto-rico/libre
|
refs/heads/master
|
libre/apps/lock_manager/literals.py
|
2
|
DEFAULT_LOCK_TIMEOUT_VALUE = 30
|
MrHyde03/android_kernel_samsung_espressovzw-jb
|
refs/heads/jb
|
scripts/gcc-wrapper.py
|
36
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:720",
"async.c:122",
"async.c:270",
"dir.c:43",
"dm.c:1053",
"dm.c:1080",
"dm-table.c:1120",
"dm-table.c:1126",
"drm_edid.c:1303",
"eventpoll.c:1143",
"f_mass_storage.c:3368",
"inode.c:72",
"inode.c:73",
"inode.c:74",
"msm_sdcc.c:126",
"msm_sdcc.c:128",
"nf_conntrack_netlink.c:790",
"nf_nat_standalone.c:118",
"return_address.c:62",
"soc-core.c:1719",
"xt_log.h:50",
"vx6953.c:3124",
"dma-mapping.c:238",
"dma-mapping.c:284",
"xt_log.h:50",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
# if m and m.group(2) not in allowed_warnings:
# print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
# if ofile:
# try:
# os.remove(ofile)
# except OSError:
# pass
# sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
cl4rke/scikit-learn
|
refs/heads/master
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
254
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
labsanmartin/Bika-LIMS
|
refs/heads/master
|
bika/lims/upgrade/to3038.py
|
3
|
from Acquisition import aq_inner
from Acquisition import aq_parent
from Products.CMFCore.utils import getToolByName
def upgrade(tool):
# Hack prevent out-of-date upgrading
# Related: PR #1484
# https://github.com/bikalabs/Bika-LIMS/pull/1484
from bika.lims.upgrade import skip_pre315
if skip_pre315(aq_parent(aq_inner(tool))):
return True
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
# update affected tools
setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow-csv')
wf = getToolByName(portal, 'portal_workflow')
wf.updateRoleMappings()
return True
|
Softmotions/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/user_api/migrations/0001_initial.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserPreference'
db.create_table('user_api_userpreference', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('user_api', ['UserPreference'])
# Adding unique constraint on 'UserPreference', fields ['user', 'key']
db.create_unique('user_api_userpreference', ['user_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'UserPreference', fields ['user', 'key']
db.delete_unique('user_api_userpreference', ['user_id', 'key'])
# Deleting model 'UserPreference'
db.delete_table('user_api_userpreference')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'user_api.userpreference': {
'Meta': {'unique_together': "(('user', 'key'),)", 'object_name': 'UserPreference'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['user_api']
|
Facetracker-project/facetracker-core
|
refs/heads/master
|
lib/youtube-dl/devscripts/gh-pages/update-sites.py
|
96
|
#!/usr/bin/env python3
from __future__ import unicode_literals
import sys
import os
import textwrap
# We must be able to import youtube_dl
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import youtube_dl
def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
ie_htmls = []
for ie in youtube_dl.list_extractors(age_limit=None):
ie_html = '<b>{}</b>'.format(ie.IE_NAME)
ie_desc = getattr(ie, 'IE_DESC', None)
if ie_desc is False:
continue
elif ie_desc is not None:
ie_html += ': {}'.format(ie.IE_DESC)
if not ie.working():
ie_html += ' (Currently broken)'
ie_htmls.append('<li>{}</li>'.format(ie_html))
template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
with open('supportedsites.html', 'w', encoding='utf-8') as sitesf:
sitesf.write(template)
if __name__ == '__main__':
main()
|
Aristocles/CouchPotatoServer
|
refs/heads/master
|
libs/tornado/platform/asyncio.py
|
14
|
"""Bridges between the `asyncio` module and Tornado IOLoop.
This is a work in progress and interfaces are subject to change.
To test:
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOLoop
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOMainLoop
(the tests log a few warnings with AsyncIOMainLoop because they leave some
unfinished callbacks on the event loop that fail when it resumes)
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
# _Timeout is used for its timedelta_to_seconds method for py26 compatibility.
from tornado.ioloop import IOLoop, _Timeout
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False):
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
self.asyncio_loop.call_soon(self.make_current)
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
self._setup_logging()
self.asyncio_loop.run_forever()
def stop(self):
self.asyncio_loop.stop()
def _run_callback(self, callback, *args, **kwargs):
try:
callback(*args, **kwargs)
except Exception:
self.handle_callback_exception(callback)
def add_timeout(self, deadline, callback):
if isinstance(deadline, (int, float)):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r", deadline)
return self.asyncio_loop.call_later(delay, self._run_callback,
stack_context.wrap(callback))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
raise RuntimeError("IOLoop is closing")
if kwargs:
self.asyncio_loop.call_soon_threadsafe(functools.partial(
self._run_callback, stack_context.wrap(callback),
*args, **kwargs))
else:
self.asyncio_loop.call_soon_threadsafe(
self._run_callback, stack_context.wrap(callback), *args)
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False)
class AsyncIOLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(),
close_loop=True)
|
DataDog/kafka-python
|
refs/heads/master
|
kafka/metrics/metrics_reporter.py
|
10
|
from __future__ import absolute_import
import abc
class AbstractMetricsReporter(object):
"""
An abstract class to allow things to listen as new metrics
are created so they can be reported.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def init(self, metrics):
"""
This is called when the reporter is first registered
to initially register all existing metrics
Arguments:
metrics (list of KafkaMetric): All currently existing metrics
"""
raise NotImplementedError
@abc.abstractmethod
def metric_change(self, metric):
"""
This is called whenever a metric is updated or added
Arguments:
metric (KafkaMetric)
"""
raise NotImplementedError
@abc.abstractmethod
def metric_removal(self, metric):
"""
This is called whenever a metric is removed
Arguments:
metric (KafkaMetric)
"""
raise NotImplementedError
@abc.abstractmethod
def configure(self, configs):
"""
Configure this class with the given key-value pairs
Arguments:
configs (dict of {str, ?})
"""
raise NotImplementedError
@abc.abstractmethod
def close(self):
"""Called when the metrics repository is closed."""
raise NotImplementedError
|
viger/docker
|
refs/heads/master
|
proxy/proxy/code/default/gae_proxy/local/direct_handler.py
|
3
|
#!/usr/bin/env python
# coding:utf-8
import errno
import time
import re
import socket
import ssl
import httplib
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
from connect_manager import https_manager
from gae_handler import return_fail_message
from google_ip import google_ip
from config import config
from xlog import getLogger
xlog = getLogger("gae_proxy")
google_server_types = ["ClientMapServer"]
def send_header(wfile, keyword, value):
keyword = keyword.title()
if keyword == 'Set-Cookie':
for cookie in re.split(r', (?=[^ =]+(?:=|$))', value):
wfile.write("%s: %s\r\n" % (keyword, cookie))
#xlog.debug("Head1 %s: %s", keyword, cookie)
elif keyword == 'Content-Disposition' and '"' not in value:
value = re.sub(r'filename=([^"\']+)', 'filename="\\1"', value)
wfile.write("%s: %s\r\n" % (keyword, value))
#xlog.debug("Head1 %s: %s", keyword, value)
elif keyword == "Alternate-Protocol":
return
else:
#xlog.debug("Head1 %s: %s", keyword, value)
wfile.write("%s: %s\r\n" % (keyword, value))
def fetch(method, host, path, headers, payload):
request_data = '%s %s HTTP/1.1\r\n' % (method, path)
request_data += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items())
request_data += '\r\n'
ssl_sock = https_manager.get_ssl_connection(host)
if not ssl_sock:
return
try:
ssl_sock.send(request_data.encode())
payload_len = len(payload)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = ssl_sock.send(payload[start:start+send_size])
start += sended
response = httplib.HTTPResponse(ssl_sock, buffering=True)
response.ssl_sock = ssl_sock
orig_timeout = ssl_sock.gettimeout()
ssl_sock.settimeout(90)
response.begin()
ssl_sock.settimeout(orig_timeout)
except httplib.BadStatusLine as e:
xlog.warn("direct_handler.fetch bad status line:%r", e)
google_ip.report_connect_closed(ssl_sock.ip, "request_fail")
response = None
except Exception as e:
xlog.warn("direct_handler.fetch:%r", e)
google_ip.report_connect_closed(ssl_sock.ip, "request_fail")
response = None
return response
def handler(method, host, url, headers, body, wfile):
time_request = time.time()
if "Connection" in headers and headers["Connection"] == "close":
del headers["Connection"]
errors = []
response = None
while True:
if time.time() - time_request > 30:
return return_fail_message(wfile)
try:
response = fetch(method, host, url, headers, body)
if response:
if response.status > 400:
server_type = response.getheader('Server', "")
if "G" not in server_type and "g" not in server_type and server_type not in google_server_types:
xlog.warn("IP:%s host:%s not support GAE, server type:%s status:%d", response.ssl_sock.ip, host, server_type, response.status)
google_ip.report_connect_fail(response.ssl_sock.ip)
response.close()
continue
break
except OpenSSL.SSL.SysCallError as e:
errors.append(e)
xlog.warn("direct_handler.handler err:%r %s/%s", e, host, url)
except Exception as e:
errors.append(e)
xlog.exception('direct_handler.handler %r %s %s , retry...', e, host, url)
try:
send_to_browser = True
try:
response_headers = dict((k.title(), v) for k, v in response.getheaders())
wfile.write("HTTP/1.1 %d %s\r\n" % (response.status, response.reason))
for key, value in response.getheaders():
send_header(wfile, key, value)
wfile.write("\r\n")
except Exception as e:
send_to_browser = False
wait_time = time.time()-time_request
xlog.info("direct_handler.handler send response fail. t:%d e:%r %s%s", wait_time, e, host, url)
if method == 'HEAD' or response.status in (204, 304):
xlog.info("DIRECT t:%d %d %s %s", (time.time()-time_request)*1000, response.status, host, url)
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host)
response.close()
return
if 'Transfer-Encoding' in response_headers:
length = 0
while True:
try:
data = response.read(8192)
except httplib.IncompleteRead, e:
data = e.partial
except Exception as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
xlog.warn("direct_handler.handler send Transfer-Encoding t:%d e:%r %s/%s", time.time()-time_request, e, host, url)
response.close()
return
if send_to_browser:
try:
if not data:
wfile.write('0\r\n\r\n')
break
length += len(data)
wfile.write('%x\r\n' % len(data))
wfile.write(data)
wfile.write('\r\n')
except Exception as e:
send_to_browser = False
xlog.info("direct_handler.handler send Transfer-Encoding t:%d e:%r %s/%s", time.time()-time_request, e, host, url)
else:
if not data:
break
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host)
response.close()
xlog.info("DIRECT chucked t:%d s:%d %d %s %s", (time.time()-time_request)*1000, length, response.status, host, url)
return
content_length = int(response.getheader('Content-Length', 0))
content_range = response.getheader('Content-Range', '')
if content_range:
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
else:
start, end, length = 0, content_length-1, content_length
time_last_read = time.time()
while True:
if start > end:
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host, call_time=time_request)
xlog.info("DIRECT t:%d s:%d %d %s %s", (time.time()-time_request)*1000, length, response.status, host, url)
return
to_read = end - start + 1
data = response.read(to_read)
if not data:
if time.time() - time_last_read > 20:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
response.close()
xlog.warn("read timeout t:%d len:%d left:%d %s %s", (time.time()-time_request)*1000, length, (end-start), host, url)
return
else:
time.sleep(0.1)
continue
time_last_read = time.time()
data_len = len(data)
start += data_len
if send_to_browser:
try:
ret = wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
xlog.debug("send to browser wfile.write ret:%d", ret)
ret = wfile.write(data)
except Exception as e_b:
if e_b[0] in (errno.ECONNABORTED, errno.EPIPE, errno.ECONNRESET) or 'bad write retry' in repr(e_b):
xlog.info('direct_handler send to browser return %r %s %r', e_b, host, url)
else:
xlog.info('direct_handler send to browser return %r %s %r', e_b, host, url)
send_to_browser = False
except NetWorkIOError as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
time_except = time.time()
time_cost = time_except - time_request
if e[0] in (errno.ECONNABORTED, errno.EPIPE) or 'bad write retry' in repr(e):
xlog.exception("direct_handler err:%r %s %s time:%d", e, host, url, time_cost)
else:
xlog.exception("direct_handler except:%r %s %s", e, host, url)
except Exception as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
xlog.exception("direct_handler except:%r %s %s", e, host, url)
|
mitsuhiko/django
|
refs/heads/master
|
tests/regressiontests/model_fields/imagefield.py
|
106
|
import os
import shutil
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.images import ImageFile
from django.test import TestCase
from models import Image, Person, PersonWithHeight, PersonWithHeightAndWidth, \
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile
# If PIL available, do these tests.
if Image:
from models import temp_storage_dir
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(__file__), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(__file__), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
p2 = self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
_ = p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
|
runt18/nupic
|
refs/heads/master
|
tests/swarming/nupic/swarming/experiments/delta/permutations.py
|
38
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'value'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'value': PermuteEncoder(fieldName='value',
encoderClass='ScalarSpaceEncoder',
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
'_classifierInput': dict(fieldname='value',
type='ScalarSpaceEncoder',
classifierOnly=True,
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
'clParams': {
'alpha': PermuteFloat(0.000100, 0.100000),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*value.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=value")
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=10:field=value"
minParticlesPerSwarm = None
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
gcd0318/django
|
refs/heads/master
|
django/db/models/base.py
|
61
|
from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, lazy_related_operation,
resolve_relation,
)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and either isn't in an application in "
"INSTALLED_APPS or else was imported before its "
"application was loaded. " % (module, name))
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.virtual_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
base._meta.concrete_model._meta.proxied_children.append(new_class._meta)
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(cls, model, field):
setattr(
field.remote_field.model,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.remote_field.model,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
wrt = opts.order_with_respect_to
lazy_related_operation(make_foreign_order_accessors, cls, wrt.remote_field.model, field=wrt)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.remote_field, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if cls._deferred:
new = cls(**dict(zip(field_names, values)))
else:
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled model instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)
}
def refresh_from_db(self, using=None, fields=None, **kwargs):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
if self._deferred:
non_deferred_model = self._meta.proxy_for_model
else:
non_deferred_model = self.__class__
db_instance_qs = non_deferred_model._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif self._deferred:
deferred_fields = self.get_deferred_fields()
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val:
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
hint=None,
obj=None,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for __, manager, __ in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents.
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
hint=None,
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
("'%s' refers to field '%s' which is not local "
"to model '%s'.") % (
option, field_name, cls._meta.object_name,
),
hint=("This issue may be caused by multi-table "
"inheritance."),
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
hint=None,
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (f.db_column is None and column_name is not None
and len(column_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (m2m.db_column is None and rel_name is not None
and len(rel_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=("Use 'through' to create a separate model "
"for M2M and then set column_name using "
"'db_column'."),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.remote_field.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.remote_field.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
if not apps.ready:
apps.populate(settings.INSTALLED_APPS)
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
Ted1993/Flasky
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
Bismarrck/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/monitored_session.py
|
42
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks (deprecated).
These are deprecated aliases for classes and functions in `tf.train`. Please use
those directly.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import monitored_session
# pylint: disable=invalid-name
Scaffold = monitored_session.Scaffold
SessionCreator = monitored_session.SessionCreator
ChiefSessionCreator = monitored_session.ChiefSessionCreator
WorkerSessionCreator = monitored_session.WorkerSessionCreator
MonitoredSession = monitored_session.MonitoredSession
# pylint: disable=invalid-name
|
maisim/django-localflavor
|
refs/heads/master
|
tests/test_si.py
|
7
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.test import SimpleTestCase
from localflavor.si.forms import (SIEMSOField, SIPhoneNumberField, SIPostalCodeField, SIPostalCodeSelect,
SITaxNumberField)
class SILocalFlavorTests(SimpleTestCase):
def test_SITaxNumberField(self):
error_invalid = ['Enter a valid tax number in form SIXXXXXXXX']
valid = {
'15012557': '15012557',
'SI15012557': '15012557',
'22111310': '22111310',
}
invalid = {
'22241310': error_invalid,
'15012558': error_invalid,
'1501': error_invalid,
'1501123123123': error_invalid,
'abcdabcd': error_invalid,
'01234579': error_invalid,
}
self.assertFieldOutput(SITaxNumberField, valid, invalid)
def test_SIEMSOField(self):
error_invalid = ['This field should contain exactly 13 digits.']
error_invalid_date = ['The first 7 digits of the EMSO must represent a valid past date.']
error_invalid_chksum = ['The EMSO is not valid.']
valid = {
'0205951500462': '0205951500462',
'2309002500068': '2309002500068',
'1010985500400': '1010985500400',
}
invalid = {
'0205951500463': error_invalid_chksum,
'020': error_invalid,
'020020595150046020595150046': error_invalid,
'aaaabbbbccccd': error_invalid,
'1010985500800': error_invalid_chksum,
'2020095500070': error_invalid_date,
'5050095500078': error_invalid_date,
'1010889500408': error_invalid_date,
}
self.assertFieldOutput(SIEMSOField, valid, invalid)
def test_SIEMSOField_info_dict(self):
valid = {
'0205951500462': {'nationality': 50, 'gender': 'male', 'birthdate': date(1951, 5, 2)},
'2309002504063': {'nationality': 50, 'gender': 'male', 'birthdate': date(2002, 9, 23)},
'1010985505402': {'nationality': 50, 'gender': 'female', 'birthdate': date(1985, 10, 10)},
}
for input, info in valid.items():
f = SIEMSOField()
f.clean(input)
self.assertEqual(f.info, info)
def test_SIPhoneNumberField(self):
error_invalid = ['Enter phone number in form +386XXXXXXXX or 0XXXXXXXX.']
valid = {
'+38640999999': '40999999',
'+3861999999': '1999999',
'0038640999999': '40999999',
'040999999': '40999999',
'01999999': '1999999',
'059099999': '59099999',
'059 09 99 99': '59099999',
'0590/999-99': '59099999',
}
invalid = {
'03861999999': error_invalid,
'3861999999': error_invalid,
}
self.assertFieldOutput(SIPhoneNumberField, valid, invalid)
def test_SIPostalCodeField(self):
valid = {
'4000': '4000',
'1000': '1000'
}
invalid = {
'1113': ['Select a valid choice. 1113 is not one of the available choices.'],
'111': ['Select a valid choice. 111 is not one of the available choices.'],
}
self.assertFieldOutput(SIPostalCodeField, valid, invalid)
def test_SIPostalCodeSelect(self):
f = SIPostalCodeSelect()
out = '''<select name="Kranj">
<option value="8341">Adle\u0161i\u010di</option>
<option value="5270">Ajdov\u0161\u010dina</option>
<option value="6280">Ankaran - Ancarano</option>
<option value="9253">Apa\u010de</option>
<option value="8253">Arti\u010de</option>
<option value="4275">Begunje na Gorenjskem</option>
<option value="1382">Begunje pri Cerknici</option>
<option value="9231">Beltinci</option>
<option value="2234">Benedikt</option>
<option value="2345">Bistrica ob Dravi</option>
<option value="3256">Bistrica ob Sotli</option>
<option value="8259">Bizeljsko</option>
<option value="1223">Blagovica</option>
<option value="8283">Blanca</option>
<option value="4260">Bled</option>
<option value="4273">Blejska Dobrava</option>
<option value="9265">Bodonci</option>
<option value="9222">Bogojina</option>
<option value="4263">Bohinjska Bela</option>
<option value="4264">Bohinjska Bistrica</option>
<option value="4265">Bohinjsko jezero</option>
<option value="1353">Borovnica</option>
<option value="5230">Bovec</option>
<option value="8294">Bo\u0161tanj</option>
<option value="5295">Branik</option>
<option value="3314">Braslov\u010de</option>
<option value="5223">Breginj</option>
<option value="8280">Brestanica</option>
<option value="2354">Bresternica</option>
<option value="4243">Brezje</option>
<option value="1351">Brezovica pri Ljubljani</option>
<option value="8250">Bre\u017eice</option>
<option value="4210">Brnik - aerodrom</option>
<option value="8321">Brusnice</option>
<option value="3255">Bu\u010de</option>
<option value="8276">Bu\u010dka</option>
<option value="9261">Cankova</option>
<option value="3000">Celje</option>
<option value="4207">Cerklje na Gorenjskem</option>
<option value="8263">Cerklje ob Krki</option>
<option value="1380">Cerknica</option>
<option value="5282">Cerkno</option>
<option value="2236">Cerkvenjak</option>
<option value="2215">Cer\u0161ak</option>
<option value="2326">Cirkovce</option>
<option value="2282">Cirkulane</option>
<option value="5273">Col</option>
<option value="6271">Dekani</option>
<option value="5210">Deskle</option>
<option value="2253">Destrnik</option>
<option value="6215">Diva\u010da</option>
<option value="1233">Dob</option>
<option value="3224">Dobje pri Planini</option>
<option value="8257">Dobova</option>
<option value="1423">Dobovec</option>
<option value="5263">Dobravlje</option>
<option value="3204">Dobrna</option>
<option value="8211">Dobrni\u010d</option>
<option value="1356">Dobrova</option>
<option value="9223">Dobrovnik - Dobronak</option>
<option value="5212">Dobrovo v Brdih</option>
<option value="1431">Dol pri Hrastniku</option>
<option value="1262">Dol pri Ljubljani</option>
<option value="1273">Dole pri Litiji</option>
<option value="1331">Dolenja vas</option>
<option value="8350">Dolenjske Toplice</option>
<option value="1230">Dom\u017eale</option>
<option value="2252">Dornava</option>
<option value="5294">Dornberk</option>
<option value="1319">Draga</option>
<option value="8343">Dragatu\u0161</option>
<option value="3222">Dramlje</option>
<option value="2370">Dravograd</option>
<option value="4203">Duplje</option>
<option value="6221">Dutovlje</option>
<option value="8361">Dvor</option>
<option value="2343">Fala</option>
<option value="9208">Fokovci</option>
<option value="2313">Fram</option>
<option value="3213">Frankolovo</option>
<option value="1274">Gabrovka</option>
<option value="8254">Globoko</option>
<option value="5275">Godovi\u010d</option>
<option value="4204">Golnik</option>
<option value="3303">Gomilsko</option>
<option value="4224">Gorenja vas</option>
<option value="3263">Gorica pri Slivnici</option>
<option value="2272">Gori\u0161nica</option>
<option value="9250">Gornja Radgona</option>
<option value="3342">Gornji Grad</option>
<option value="4282">Gozd Martuljek</option>
<option value="9264">Grad</option>
<option value="8332">Gradac</option>
<option value="1384">Grahovo</option>
<option value="5242">Grahovo ob Ba\u010di</option>
<option value="6272">Gra\u010di\u0161\u010de</option>
<option value="5251">Grgar</option>
<option value="3302">Gri\u017ee</option>
<option value="3231">Grobelno</option>
<option value="1290">Grosuplje</option>
<option value="2288">Hajdina</option>
<option value="8362">Hinje</option>
<option value="9205">Hodo\u0161 - Hodos</option>
<option value="1354">Horjul</option>
<option value="1372">Hotedr\u0161ica</option>
<option value="2311">Ho\u010de</option>
<option value="1430">Hrastnik</option>
<option value="6225">Hru\u0161evje</option>
<option value="4276">Hru\u0161ica</option>
<option value="5280">Idrija</option>
<option value="1292">Ig</option>
<option value="6250">Ilirska Bistrica</option>
<option value="6251">Ilirska Bistrica - Trnovo</option>
<option value="2259">Ivanjkovci</option>
<option value="1295">Ivan\u010dna Gorica</option>
<option value="1411">Izlake</option>
<option value="6310">Izola - Isola</option>
<option value="2222">Jakobski Dol</option>
<option value="2221">Jarenina</option>
<option value="6254">Jel\u0161ane</option>
<option value="4270">Jesenice</option>
<option value="8261">Jesenice na Dolenjskem</option>
<option value="3273">Jurklo\u0161ter</option>
<option value="2223">Jurovski Dol</option>
<option value="2256">Jur\u0161inci</option>
<option value="5214">Kal nad Kanalom</option>
<option value="3233">Kalobje</option>
<option value="4246">Kamna Gorica</option>
<option value="2351">Kamnica</option>
<option value="1241">Kamnik</option>
<option value="5213">Kanal</option>
<option value="8258">Kapele</option>
<option value="2362">Kapla</option>
<option value="2325">Kidri\u010devo</option>
<option value="1412">Kisovec</option>
<option value="6253">Kne\u017eak</option>
<option value="5222">Kobarid</option>
<option value="9227">Kobilje</option>
<option value="2276">Kog</option>
<option value="5211">Kojsko</option>
<option value="6223">Komen</option>
<option value="1218">Komenda</option>
<option value="6000">Koper</option>
<option value="8282">Koprivnica</option>
<option value="5296">Kostanjevica na Krasu</option>
<option value="8311">Kostanjevica na Krki</option>
<option value="1336">Kostel</option>
<option value="2394">Kotlje</option>
<option value="6240">Kozina</option>
<option value="3260">Kozje</option>
<option value="1330">Ko\u010devje</option>
<option value="1338">Ko\u010devska Reka</option>
<option value="6256">Ko\u0161ana</option>
<option value="4000" selected="selected">Kranj</option>
<option value="4280">Kranjska Gora</option>
<option value="1281">Kresnice</option>
<option value="4294">Kri\u017ee</option>
<option value="9206">Kri\u017eevci</option>
<option value="9242">Kri\u017eevci pri Ljutomeru</option>
<option value="1301">Krka</option>
<option value="8296">Krmelj</option>
<option value="4245">Kropa</option>
<option value="8262">Kr\u0161ka vas</option>
<option value="8270">Kr\u0161ko</option>
<option value="9263">Kuzma</option>
<option value="2318">Laporje</option>
<option value="1219">Laze v Tuhinju</option>
<option value="3270">La\u0161ko</option>
<option value="2230">Lenart v Slovenskih goricah</option>
<option value="9220">Lendava - Lendva</option>
<option value="4248">Lesce</option>
<option value="3261">Lesi\u010dno</option>
<option value="8273">Leskovec pri Kr\u0161kem</option>
<option value="2372">Libeli\u010de</option>
<option value="2341">Limbu\u0161</option>
<option value="1270">Litija</option>
<option value="3202">Ljube\u010dna</option>
<option value="1000">Ljubljana</option>
<option value="3333">Ljubno ob Savinji</option>
<option value="9240">Ljutomer</option>
<option value="5231">Log pod Mangartom</option>
<option value="1358">Log pri Brezovici</option>
<option value="1370">Logatec</option>
<option value="1434">Loka pri Zidanem Mostu</option>
<option value="3223">Loka pri \u017dusmu</option>
<option value="6219">Lokev</option>
<option value="2324">Lovrenc na Dravskem polju</option>
<option value="2344">Lovrenc na Pohorju</option>
<option value="3215">Lo\u010de</option>
<option value="1318">Lo\u0161ki Potok</option>
<option value="1225">Lukovica</option>
<option value="3334">Lu\u010de</option>
<option value="2322">Maj\u0161perk</option>
<option value="2321">Makole</option>
<option value="9243">Mala Nedelja</option>
<option value="2229">Male\u010dnik</option>
<option value="6273">Marezige</option>
<option value="2000">Maribor</option>
<option value="2206">Marjeta na Dravskem polju</option>
<option value="2281">Markovci</option>
<option value="9221">Martjanci</option>
<option value="6242">Materija</option>
<option value="4211">Mav\u010di\u010de</option>
<option value="9202">Ma\u010dkovci</option>
<option value="1215">Medvode</option>
<option value="1234">Menge\u0161</option>
<option value="8330">Metlika</option>
<option value="2392">Me\u017eica</option>
<option value="2204">Miklav\u017e na Dravskem polju</option>
<option value="2275">Miklav\u017e pri Ormo\u017eu</option>
<option value="5291">Miren</option>
<option value="8233">Mirna</option>
<option value="8216">Mirna Pe\u010d</option>
<option value="2382">Mislinja</option>
<option value="4281">Mojstrana</option>
<option value="8230">Mokronog</option>
<option value="9226">Moravske Toplice</option>
<option value="1251">Morav\u010de</option>
<option value="5216">Most na So\u010di</option>
<option value="1221">Motnik</option>
<option value="3330">Mozirje</option>
<option value="9000">Murska Sobota</option>
<option value="2366">Muta</option>
<option value="4202">Naklo</option>
<option value="3331">Nazarje</option>
<option value="1357">Notranje Gorice</option>
<option value="3203">Nova Cerkev</option>
<option value="5000">Nova Gorica</option>
<option value="1385">Nova vas</option>
<option value="8000">Novo mesto</option>
<option value="6243">Obrov</option>
<option value="9233">Odranci</option>
<option value="2317">Oplotnica</option>
<option value="2312">Orehova vas</option>
<option value="2270">Ormo\u017e</option>
<option value="1316">Ortnek</option>
<option value="1337">Osilnica</option>
<option value="8222">Oto\u010dec</option>
<option value="2361">O\u017ebalt</option>
<option value="2231">Pernica</option>
<option value="2211">Pesnica pri Mariboru</option>
<option value="9203">Petrovci</option>
<option value="3301">Petrov\u010de</option>
<option value="6330">Piran - Pirano</option>
<option value="6257">Pivka</option>
<option value="8255">Pi\u0161ece</option>
<option value="6232">Planina</option>
<option value="3225">Planina pri Sevnici</option>
<option value="6276">Pobegi</option>
<option value="8312">Podbo\u010dje</option>
<option value="5243">Podbrdo</option>
<option value="2273">Podgorci</option>
<option value="6216">Podgorje</option>
<option value="2381">Podgorje pri Slovenj Gradcu</option>
<option value="6244">Podgrad</option>
<option value="1414">Podkum</option>
<option value="2286">Podlehnik</option>
<option value="5272">Podnanos</option>
<option value="4244">Podnart</option>
<option value="3241">Podplat</option>
<option value="3257">Podsreda</option>
<option value="2363">Podvelka</option>
<option value="3254">Pod\u010detrtek</option>
<option value="2208">Pohorje</option>
<option value="2257">Polen\u0161ak</option>
<option value="1355">Polhov Gradec</option>
<option value="4223">Poljane nad \u0160kofjo Loko</option>
<option value="2319">Polj\u010dane</option>
<option value="3313">Polzela</option>
<option value="1272">Pol\u0161nik</option>
<option value="3232">Ponikva</option>
<option value="6320">Portoro\u017e - Portorose</option>
<option value="6230">Postojna</option>
<option value="2331">Pragersko</option>
<option value="3312">Prebold</option>
<option value="4205">Preddvor</option>
<option value="6255">Prem</option>
<option value="1352">Preserje</option>
<option value="6258">Prestranek</option>
<option value="2391">Prevalje</option>
<option value="3262">Prevorje</option>
<option value="1276">Primskovo</option>
<option value="3253">Pristava pri Mestinju</option>
<option value="9207">Prosenjakovci - Partosfalva</option>
<option value="5297">Prva\u010dina</option>
<option value="2250">Ptuj</option>
<option value="2323">Ptujska Gora</option>
<option value="9201">Puconci</option>
<option value="9252">Radenci</option>
<option value="1433">Rade\u010de</option>
<option value="2360">Radlje ob Dravi</option>
<option value="1235">Radomlje</option>
<option value="4240">Radovljica</option>
<option value="8274">Raka</option>
<option value="1381">Rakek</option>
<option value="4283">Rate\u010de - Planica</option>
<option value="2390">Ravne na Koro\u0161kem</option>
<option value="2327">Ra\u010de</option>
<option value="5292">Ren\u010de</option>
<option value="3332">Re\u010dica ob Savinji</option>
<option value="1310">Ribnica</option>
<option value="2364">Ribnica na Pohorju</option>
<option value="3272">Rimske Toplice</option>
<option value="1314">Rob</option>
<option value="3252">Rogatec</option>
<option value="3250">Roga\u0161ka Slatina</option>
<option value="9262">Roga\u0161ovci</option>
<option value="1373">Rovte</option>
<option value="5215">Ro\u010dinj</option>
<option value="2342">Ru\u0161e</option>
<option value="1282">Sava</option>
<option value="4227">Selca</option>
<option value="2352">Selnica ob Dravi</option>
<option value="8333">Semi\u010d</option>
<option value="8281">Senovo</option>
<option value="6224">Seno\u017ee\u010de</option>
<option value="8290">Sevnica</option>
<option value="6333">Se\u010dovlje - Sicciole</option>
<option value="6210">Se\u017eana</option>
<option value="2214">Sladki vrh</option>
<option value="5283">Slap ob Idrijci</option>
<option value="2380">Slovenj Gradec</option>
<option value="2310">Slovenska Bistrica</option>
<option value="3210">Slovenske Konjice</option>
<option value="1216">Smlednik</option>
<option value="1317">Sodra\u017eica</option>
<option value="5250">Solkan</option>
<option value="3335">Sol\u010dava</option>
<option value="4229">Sorica</option>
<option value="4225">Sovodenj</option>
<option value="5232">So\u010da</option>
<option value="5281">Spodnja Idrija</option>
<option value="2241">Spodnji Duplek</option>
<option value="9245">Spodnji Ivanjci</option>
<option value="2277">Sredi\u0161\u010de ob Dravi</option>
<option value="4267">Srednja vas v Bohinju</option>
<option value="8256">Sromlje</option>
<option value="5224">Srpenica</option>
<option value="1242">Stahovica</option>
<option value="1332">Stara Cerkev</option>
<option value="8342">Stari trg ob Kolpi</option>
<option value="1386">Stari trg pri Lo\u017eu</option>
<option value="2205">Star\u0161e</option>
<option value="2289">Stoperce</option>
<option value="8322">Stopi\u010de</option>
<option value="3206">Stranice</option>
<option value="8351">Stra\u017ea</option>
<option value="1313">Struge</option>
<option value="8293">Studenec</option>
<option value="8331">Suhor</option>
<option value="2353">Sv. Duh na Ostrem Vrhu</option>
<option value="2233">Sveta Ana v Slovenskih goricah</option>
<option value="2235">Sveta Trojica v Slovenskih goricah</option>
<option value="9244">Sveti Jurij ob \u0160\u010davnici</option>
<option value="2258">Sveti Toma\u017e</option>
<option value="3264">Sveti \u0160tefan</option>
<option value="3304">Tabor</option>
<option value="3221">Teharje</option>
<option value="9251">Ti\u0161ina</option>
<option value="5220">Tolmin</option>
<option value="3326">Topol\u0161ica</option>
<option value="2371">Trbonje</option>
<option value="1420">Trbovlje</option>
<option value="8231">Trebelno</option>
<option value="8210">Trebnje</option>
<option value="5252">Trnovo pri Gorici</option>
<option value="2254">Trnovska vas</option>
<option value="1222">Trojane</option>
<option value="1236">Trzin</option>
<option value="4290">Tr\u017ei\u010d</option>
<option value="8295">Tr\u017ei\u0161\u010de</option>
<option value="1311">Turjak</option>
<option value="9224">Turni\u0161\u010de</option>
<option value="8323">Ur\u0161na sela</option>
<option value="1252">Va\u010de</option>
<option value="3320">Velenje - dostava</option>
<option value="3322">Velenje - po\u0161tni predali</option>
<option value="8212">Velika Loka</option>
<option value="2274">Velika Nedelja</option>
<option value="9225">Velika Polana</option>
<option value="1315">Velike La\u0161\u010de</option>
<option value="8213">Veliki Gaber</option>
<option value="9241">Ver\u017eej</option>
<option value="1312">Videm - Dobrepolje</option>
<option value="2284">Videm pri Ptuju</option>
<option value="8344">Vinica pri \u010crnomlju</option>
<option value="5271">Vipava</option>
<option value="4212">Visoko</option>
<option value="3205">Vitanje</option>
<option value="2255">Vitomarci</option>
<option value="1294">Vi\u0161nja Gora</option>
<option value="1217">Vodice</option>
<option value="3212">Vojnik</option>
<option value="2232">Voli\u010dina</option>
<option value="5293">Vol\u010dja Draga</option>
<option value="3305">Vransko</option>
<option value="6217">Vremski Britof</option>
<option value="1360">Vrhnika</option>
<option value="2365">Vuhred</option>
<option value="2367">Vuzenica</option>
<option value="8292">Zabukovje</option>
<option value="1410">Zagorje ob Savi</option>
<option value="1303">Zagradec</option>
<option value="2283">Zavr\u010d</option>
<option value="8272">Zdole</option>
<option value="4201">Zgornja Besnica</option>
<option value="2242">Zgornja Korena</option>
<option value="2201">Zgornja Kungota</option>
<option value="2316">Zgornja Lo\u017enica</option>
<option value="2314">Zgornja Polskava</option>
<option value="2213">Zgornja Velka</option>
<option value="4247">Zgornje Gorje</option>
<option value="4206">Zgornje Jezersko</option>
<option value="2285">Zgornji Leskovec</option>
<option value="1432">Zidani Most</option>
<option value="3214">Zre\u010de</option>
<option value="8251">\u010cate\u017e ob Savi</option>
<option value="1413">\u010cem\u0161enik</option>
<option value="5253">\u010cepovan</option>
<option value="9232">\u010cren\u0161ovci</option>
<option value="2393">\u010crna na Koro\u0161kem</option>
<option value="6275">\u010crni Kal</option>
<option value="5274">\u010crni Vrh nad Idrijo</option>
<option value="5262">\u010crni\u010de</option>
<option value="8340">\u010crnomelj</option>
<option value="9204">\u0160alovci</option>
<option value="5261">\u0160empas</option>
<option value="5290">\u0160empeter pri Gorici</option>
<option value="3311">\u0160empeter v Savinjski dolini</option>
<option value="2212">\u0160entilj v Slovenskih goricah</option>
<option value="8297">\u0160entjan\u017e</option>
<option value="2373">\u0160entjan\u017e pri Dravogradu</option>
<option value="8310">\u0160entjernej</option>
<option value="3230">\u0160entjur</option>
<option value="3271">\u0160entrupert</option>
<option value="8232">\u0160entrupert</option>
<option value="1296">\u0160entvid pri Sti\u010dni</option>
<option value="4208">\u0160en\u010dur</option>
<option value="8275">\u0160kocjan</option>
<option value="6281">\u0160kofije</option>
<option value="4220">\u0160kofja Loka</option>
<option value="3211">\u0160kofja vas</option>
<option value="1291">\u0160kofljica</option>
<option value="6274">\u0160marje</option>
<option value="1293">\u0160marje - Sap</option>
<option value="3240">\u0160marje pri Jel\u0161ah</option>
<option value="8220">\u0160marje\u0161ke Toplice</option>
<option value="2315">\u0160martno na Pohorju</option>
<option value="3341">\u0160martno ob Dreti</option>
<option value="3327">\u0160martno ob Paki</option>
<option value="1275">\u0160martno pri Litiji</option>
<option value="2383">\u0160martno pri Slovenj Gradcu</option>
<option value="3201">\u0160martno v Ro\u017eni dolini</option>
<option value="3325">\u0160o\u0161tanj</option>
<option value="6222">\u0160tanjel</option>
<option value="3220">\u0160tore</option>
<option value="4209">\u017dabnica</option>
<option value="3310">\u017dalec</option>
<option value="4228">\u017delezniki</option>
<option value="2287">\u017detale</option>
<option value="4226">\u017diri</option>
<option value="4274">\u017dirovnica</option>
<option value="8360">\u017du\u017eemberk</option>
</select>'''
self.assertHTMLEqual(f.render('Kranj', '4000'), out)
|
seem-sky/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_grammar.py
|
68
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test.support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def test_backslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def test_plain_integers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def test_long_integers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def test_floats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def test_float_exponent_tokenization(self):
# See issue 21642.
self.assertEqual(1 if 1else 0, 1)
self.assertEqual(1 if 0else 0, 0)
self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
def test_string_literals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
def test_ellipsis(self):
x = ...
self.assertTrue(x is Ellipsis)
self.assertRaises(SyntaxError, eval, ".. .")
def test_eof_error(self):
samples = ("def foo(", "\ndef foo(", "def foo(\n")
for s in samples:
with self.assertRaises(SyntaxError) as cm:
compile(s, "<test>", "exec")
self.assertIn("unexpected EOF", str(cm.exception))
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def test_eval_input(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def test_funcdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
def pos0key1(*, key): return key
pos0key1(key=100)
def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# argument annotation tests
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
def f(x:int): pass
self.assertEqual(f.__annotations__, {'x': int})
def f(*x:str): pass
self.assertEqual(f.__annotations__, {'x': str})
def f(**x:float): pass
self.assertEqual(f.__annotations__, {'x': float})
def f(x, y:1+2): pass
self.assertEqual(f.__annotations__, {'y': 3})
def f(a, b:1, c:2, d): pass
self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
**k:11) -> 12: pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for issue #20625 -- annotations mangling
class Spam:
def f(self, *, __kw:1):
pass
class Ham(Spam): pass
self.assertEqual(Spam.f.__annotations__, {'_Spam__kw': 1})
self.assertEqual(Ham.f.__annotations__, {'_Spam__kw': 1})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
def null(x): return x
@null
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
# test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
def f(*, k=1): return closure
def f() -> int: return closure
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def test_lambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
l6 = lambda x, y, *, k=20: x+y+k
self.assertEqual(l6(1,2), 1+2+20)
self.assertEqual(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def test_simple_stmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def test_expr_stmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
# Check the heuristic for print & exec covers significant cases
# As well as placing some limits on false positives
def test_former_statements_refer_to_builtins(self):
keywords = "print", "exec"
# Cases where we want the custom error
cases = [
"{} foo",
"{} {{1:foo}}",
"if 1: {} foo",
"if 1: {} {{1:foo}}",
"if 1:\n {} foo",
"if 1:\n {} {{1:foo}}",
]
for keyword in keywords:
custom_msg = "call to '{}'".format(keyword)
for case in cases:
source = case.format(keyword)
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, custom_msg):
exec(source)
source = source.replace("foo", "(foo.)")
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(source)
def test_del_stmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def test_pass_stmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def test_break_stmt(self):
# 'break'
while 1: break
def test_continue_stmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def test_return(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def test_yield(self):
# Allowed as standalone statement
def g(): yield 1
def g(): yield from ()
# Allowed as RHS of assignment
def g(): x = yield 1
def g(): x = yield from ()
# Ordinary yield accepts implicit tuples
def g(): yield 1, 1
def g(): x = yield 1, 1
# 'yield from' does not
check_syntax_error(self, "def g(): yield from (), 1")
check_syntax_error(self, "def g(): x = yield from (), 1")
# Requires parentheses as subexpression
def g(): 1, (yield 1)
def g(): 1, (yield from ())
check_syntax_error(self, "def g(): 1, yield 1")
check_syntax_error(self, "def g(): 1, yield from ()")
# Requires parentheses as call argument
def g(): f((yield 1))
def g(): f((yield 1), 1)
def g(): f((yield from ()))
def g(): f((yield from ()), 1)
check_syntax_error(self, "def g(): f(yield 1)")
check_syntax_error(self, "def g(): f(yield 1, 1)")
check_syntax_error(self, "def g(): f(yield from ())")
check_syntax_error(self, "def g(): f(yield from (), 1)")
# Not allowed at top level
check_syntax_error(self, "yield")
check_syntax_error(self, "yield from")
# Not allowed at class scope
check_syntax_error(self, "class foo:yield 1")
check_syntax_error(self, "class foo:yield from ()")
def test_raise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def test_import(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def test_global(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def test_nonlocal(self):
# 'nonlocal' NAME (',' NAME)*
x = 0
y = 0
def f():
nonlocal x
nonlocal x, y
def test_assert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert True
except AssertionError as e:
self.fail("'assert True' should not have raised an AssertionError")
try:
assert True, 'this should always pass'
except AssertionError as e:
self.fail("'assert True, msg' should not have "
"raised an AssertionError")
# these tests fail if python is run with -O, so check __debug__
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def testAssert2(self):
try:
assert 0, "msg"
except AssertionError as e:
self.assertEqual(e.args[0], "msg")
else:
self.fail("AssertionError not raised by assert 0")
try:
assert False
except AssertionError as e:
self.assertEqual(len(e.args), 0)
else:
self.fail("AssertionError not raised by 'assert False'")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def test_if(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def test_while(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def test_for(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def test_try(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def test_suite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def test_test(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def test_comparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def test_binary_mask_ops(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def test_shift_ops(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def test_additive_ops(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def test_multiplicative_ops(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def test_unary_ops(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def test_selectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def test_atoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def test_classdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def test_dictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def test_listcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def test_genexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def test_comprehension_specials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def test_if_else_expr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(x)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
herilalaina/scikit-learn
|
refs/heads/master
|
examples/manifold/plot_mds.py
|
88
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
l10n_hr/l10n_hr_account/__openerp__.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr_account
# Author: Goran Kliska
# mail: gkliskaATgmail.com
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# http://www.slobodni-programi.hr
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Croatian localization - taxes",
"description" : """
Croatian localisation.
======================
Author: Goran Kliska @ Slobodni programi d.o.o.
Change fiscal position is borrowed from module account_invoice_change_fiscal_position
Thanks to Leonardo Pistone <leonardo.pistone@domsense.com>
Contributions:
.
Description:
Model i Poziv na broj na izlaznim računima,
Datum isporuke na izlaznim računima,
Promjena fiskalne pozicije na stavkama računa
TODO Ispis ovih podataka na racunu, mozda u posebnom modulu.
""",
"version" : "0.3",
"author" : "Slobodni programi d.o.o.",
"category" : "Localisation/Croatia",
"website": "http://www.slobodni-programi.hr",
'depends': [
'account',
],
'init_xml': [],
'update_xml': [
'account_view.xml',
'account_invoice_view.xml',
],
"demo_xml" : [],
'test' : [],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
s-t-e-a-l-t-h/Eclipsing-binaries-library
|
refs/heads/master
|
objects/Observer.py
|
1
|
#!/usr/bin/python
# import numpy as np
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
import objects.Function as Fn
import numpy as np
# import math as m
import sys
import globe.variables as gv
import os
import objects.Lightcurve as Lc
import objects.Iostream as Io
from objects.Timer import Timer
class Observer:
def __init__(
self,
passband=None,
limb_darkening_model=None,
observe=None, # object to observe, e.g. binary star system creted as Binary class
limb_darkening_interp_method="nearest",
verbose=False
):
# <default>
self.exception = []
self.passband_model = None
self.passband_range = None
self.init = True
self.passband_list = ['bolometric', 'Generic/Bessell.U', 'Generic/Bessell.B', 'Generic/Bessell.V',
'Generic/Bessell.R', 'Generic/Bessell.I', 'SLOAN/SDSS.u',
'SLOAN/SDSS.g', 'SLOAN/SDSS.r', 'SLOAN/SDSS.i', 'SLOAN/SDSS.z', 'Generic/Stromgren.u',
'Generic/Stromgren.v', 'Generic/Stromgren.b',
'Generic/Stromgren.y']
self.limb_darkening_interp_method = limb_darkening_interp_method
# < /default>
self.verbose = verbose
self.limb_dakening_model = limb_darkening_model
if passband in self.passband_list:
self.passband = passband
# IMPORTANT: toto je dolezite, tu sa do premennej self.passband_model za`se cela funkcia (cely pointer),
# takze sa potom da volat normalne self.passband_model(var)
self.set_passband_model()
self.set_passband_range()
else:
if self.verbose:
print(Fn.color_string("error", "ValueError: ") +
"In class Observer, function __init__(), line " + str(
Fn.lineno()) + ". Variable `passband` is invalid.")
self.exception.append("ValueError: In class Observer, function __init__(), line " + str(
Fn.lineno()) + ". Variable `passband` is invalid.")
self.init = False
if observe is not None:
self.observe = observe
else:
if self.verbose:
print(Fn.color_string("warning", "Warning: ") +
"In class Observer, function __init__(), line " + str(
Fn.lineno()) + ". Nothing to observer.")
self.exception.append(
"Warning: In class Observer, function __init__(), line " + str(Fn.lineno()) + ". Nothing to observer.")
self.init = False
@classmethod
def limb_darkening_linear(cls, gamma, xlin):
return 1.0 - (xlin * (1. - abs(np.cos(gamma))))
@classmethod
def limb_darkening_logarithmic(cls, gamma, xlog, ylog):
return 1.0 - (xlog * (1.0 - abs(np.cos(gamma)))) - (ylog * abs(np.cos(gamma)) * np.log10(abs(np.cos(gamma))))
@classmethod
def limb_darkening_sqrt(cls, gamma, xsqrt, ysqrt):
return 1.0 - (xsqrt * (1.0 - abs(np.cos(gamma)))) - (ysqrt * (1.0 - np.sqrt(abs(np.cos(gamma)))))
@classmethod
def limb_darkening_coefficients(
cls,
limb_darkeing_model=None,
passband=None,
metallicity=None,
temperature=None,
gravity=None,
interpolation_method="nearest",
verbose=False
):
if verbose:
print(Fn.color_string("info", "Info: ") + "Computing limb darkening coefficients.")
# vrati kompletnu tabulku limb darkening koeficientov potrebnu pre interpolaciu
ldc_table = np.array(
cls.limb_darkening_table(verbose=verbose, passband=passband, limb_darkening_model=limb_darkeing_model))
# body pre interpolaciu
points, values = [], []
if limb_darkeing_model == 'linear':
for item in ldc_table:
points.append([item[0], item[1], item[2]]) # [temperature, gravity, metallicity]
values.append(item[3]) # [xlin]
else:
values = [[], []]
for item in ldc_table:
points.append(np.array([item[0], item[1], item[2]])) # [temperature, gravity, metallicity]
values[0].append(item[3])
values[1].append(item[4])
from scipy import interpolate
# hodnoty v ktorych chceme interpolovat
uvw = np.array(
[np.array([temp, np.log10(grav), metallicity]) for temp, grav in list(zip(temperature, gravity))])
if limb_darkeing_model == "linear":
coefficients = interpolate.griddata(np.array(points), np.array(values), uvw, method=interpolation_method)
else:
x = interpolate.griddata(np.array(points), np.array(values[0]), uvw, method=interpolation_method)
y = interpolate.griddata(np.array(points), np.array(values[1]), uvw, method=interpolation_method)
coefficients = np.array(list(zip(*[x, y])))
if len(coefficients) != len(uvw):
if verbose:
print(Fn.color_string("error",
"Error: ") + "Error has been occured durring iterpolation. Length of input and output array is not same.")
return False
return coefficients
@classmethod
def limb_darkening_table(
cls,
limb_darkening_model=None,
passband=None,
verbose=False
):
# passband translator ------------------------------------------------------------------------------------------
# kedze neviem s istotou, ktory filter prepisat v tabulke limbdarkening ako Generic/Bessell.U,
# ci Johnson_U alebo Bessell_UX alebo ktory, tak si tu pre to radsej spravim translator
inp = np.array(
["Generic/Bessell.U", "Generic/Bessell.B", "Generic/Bessell.V", "Generic/Bessell.R", "Generic/Bessell.I"])
outp = np.array(
["Johnson_U", "Johnson_B", "Johnson_V", "Johnson_R", "Johnson_I"])
index = np.where(inp == passband)[0]
if not Fn.empty(index):
passband = outp[index[0]]
# --------------------------------------------------------------------------------------------------------------
# dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# import sqlite3
# conn = sqlite3.connect(dir_path + '/database/database.db')
# c = conn.cursor()
mysql_conn = MySQLdb.connect(host=gv.HOST, # your host, usually localhost
user=gv.USER, # your username
passwd=gv.PWD, # your password
db="elisa_assets") # name of the data base
c = mysql_conn.cursor()
if limb_darkening_model is 'linear':
# qry = 'SELECT temperature, gravity, metallicity, xlin FROM limbdarkening WHERE filter = "' + str \
# (passband) + '" COLLATE NOCASE'
qry = 'SELECT temperature, gravity, metallicity, xlin FROM limbdarkening WHERE filter = "' + str \
(passband) + '"'
elif limb_darkening_model is 'logarithmic':
# qry = 'SELECT temperature, gravity, metallicity, xlog, ylog FROM limbdarkening WHERE filter = "' + str \
# (passband) + '" COLLATE NOCASE'
qry = 'SELECT temperature, gravity, metallicity, xlog, ylog FROM limbdarkening WHERE filter = "' + str \
(passband) + '"'
elif limb_darkening_model is 'sqrt':
# qry = 'SELECT temperature, gravity, metallicity, xsqrt, ysqrt FROM limbdarkening WHERE filter = "' + str \
# (passband) + '" COLLATE NOCASE'
qry = 'SELECT temperature, gravity, metallicity, xsqrt, ysqrt FROM limbdarkening WHERE filter = "' + str \
(passband) + '"'
else:
if verbose:
print(Fn.color_string("error", "ValueError: ") +
"In class: Observer, function: limb_darkening_table, line: " + str(Fn.lineno()) +
". There is no such limb darkening model. Use `linear`, `logarithmic` or `sqrt`.")
return False
c.execute(qry)
ret_val = np.array(c.fetchall()).tolist()
if Fn.empty(ret_val):
if verbose:
print(Fn.color_string("error", "EmptyVariableError: ") +
"In class: Observer, function: limb_darkening_table, line: " + str(Fn.lineno()) +
". Empty list, probably sqlite query problem.")
mysql_conn.close()
return False
mysql_conn.close()
return ret_val
@classmethod
def limb_darkening_factor(
cls,
faces_orientation=None,
limb_darkening_model=None,
limb_darkening_coefficients=None,
verbose=False
):
import objects.Geometry as Geo
gamma = np.array(
[Geo.angle(u=np.array([1.0, 0.0, 0.0]), v=normal, verbose=verbose) for normal in faces_orientation])
# gamma = np.array([0.0] * len(faces_orientation))
# tu je to osetrene tak, aby ked sa pouziva len jeden limb darkenink koeficient, tak aby sa nemusel upravovat
# kod pre vypocet nizsie, tu sa proste zvacsi pole z jednotkoveho na potrebnu dlzku, nakopiruje sa
if len(limb_darkening_coefficients) == 1:
if type(limb_darkening_coefficients) == type(np.array([])):
limb_darkening_coefficients = np.array(len(faces_orientation) * limb_darkening_coefficients.tolist())
else:
limb_darkening_coefficients = np.array(len(faces_orientation) * limb_darkening_coefficients)
if len(limb_darkening_coefficients) != len(faces_orientation):
if verbose:
print(Fn.color_string("error",
"Error: ") + "Length of variables `limb_darkeing_coefficients` and `faces_orientation` is not same.")
return False
ld_factor = None
if limb_darkening_model == "linear":
ld_factor = np.array(
[cls.limb_darkening_linear(gamma=g, xlin=ldc) for g, ldc in
list(zip(gamma, limb_darkening_coefficients))])
elif limb_darkening_model == "logarithmic":
ld_factor = np.array(
[cls.limb_darkening_logarithmic(gamma=g, xlog=ldc[0], ylog=ldc[1]) for g, ldc in
list(zip(gamma, limb_darkening_coefficients))])
elif limb_darkening_model == "sqrt":
ld_factor = np.array(
[cls.limb_darkening_sqrt(gamma=g, xsqrt=ldc[0], ysqrt=ldc[1]) for g, ldc in
list(zip(gamma, limb_darkening_coefficients))])
return [ld_factor, gamma]
@classmethod
def get_passband_table(
cls,
passband=None
):
# dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# import sqlite3
# conn = sqlite3.connect(dir_path + '/database/database.db')
mysql_conn = MySQLdb.connect(host=gv.HOST, # your host, usually localhost
user=gv.USER, # your username
passwd=gv.PWD, # your password
db="elisa_assets") # name of the data base
c = mysql_conn.cursor()
order_by = ' ORDER BY wavelength ASC'
c.execute('SELECT wavelength, pass FROM passband WHERE filter = "' + passband + '"' + order_by)
ret_val = np.array(c.fetchall()).tolist()
mysql_conn.close()
return ret_val
@staticmethod
def passband_interpolation(
passband=None
):
passband = list(passband)
from scipy import interpolate
x, y = np.array(list(zip(*passband))[0]), np.array(list(zip(*passband))[1])
return interpolate.Akima1DInterpolator(x, y)
# medota bolometric stale vracia 1.0
# vsetky ostatne passbandy vracaju interpolacnu funkciu s parametrom vlnovej dlzky, ktora sa potom pouziva pri
# vypocte ziarenia; ak sa pocita bolometricky tok ziarenia, tak nie je kvazi nastaveny ziaden passband, tak aby to
# nemuselo byt vo funkciach pre intenzitu ziarenia podmienkovane, tak do rovnic stale vstupuje passband funkcia, ale
# v pripade bolometric je stale prenasobi len rovnicu 1.0, takze nic sa nedeje
# NOTE: mozno to bude mierne spomalovat kod, pre bolometric, ale ten sa aj tak casto nepouzia, ale pri nastavenom
# passband inom ako bolometric sa musi aj tak vyhodnocovat vypoctom, takze to spomaluje este viac, cize je to jedno
@staticmethod
def bolometric(w):
if False: return w # tento zapis je tu preto, aby nepyskoval PyCharm
return 1.0
def set_passband_range(self):
if self.verbose: print(Fn.color_string("info", "Info: ") + "Setting passband range.")
self.passband_range = self.passband_model_range(passband=self.passband)
def set_passband_model(self):
if self.verbose: print(Fn.color_string("info", "Info: ") + "Setting passband model.")
table = self.get_passband_table(passband=self.passband)
self.passband_model = self.passband_interpolation(passband=table)
try:
if self.passband == "bolometric":
self.passband_model = self.bolometric
else:
table = self.get_passband_table(passband=self.passband)
self.passband_model = self.passband_interpolation(passband=table)
return True
except:
if self.verbose:
print(Fn.color_string("error", "Error: ") +
"In class Observer, function set_passband_model(), line " + str(
Fn.lineno()) + ". Error has been occurred. Problem occurred probably during passband interpolation.")
return False
def get_passband_model(self):
return self.passband_model
@classmethod
def passband_model_range(cls, passband=None):
pb = cls.get_passband_table(passband=passband)
pb = list(zip(*cls.get_passband_table(passband=passband)))[0] if passband != "bolometric" else [0.0, 10000.0]
return [min(pb), max(pb)]
def get_passband(self):
return self.passband
def get_passband_range(self):
return self.passband_range
def get_limb_darkening_model(self):
return self.limb_dakening_model
def set_limb_darkening_model(self, limb_darkening_model=None):
self.limb_dakening_model = limb_darkening_model
def compute_lightcurve(self, lightcurve_params=None, starmodel_params=None, limb_darkening_params=None,
postprocess_params=None, verbose=False):
# bludny riadok
if verbose: pass
if Fn.empty(lightcurve_params):
if self.verbose:
print(Fn.color_string("error", "Error: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `lightcurve_params` is empty.")
self.exception.append("Error: In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `lightcurve_params` is empty.")
return False
if Fn.empty(starmodel_params):
if self.verbose:
print(Fn.color_string("error", "Error: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `starmodel_params` is empty.")
self.exception.append("Error: In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `starmodel_params` is empty.")
return False
if Fn.empty(postprocess_params):
if self.verbose:
print(Fn.color_string("error", "Error: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `postprocess_params` is empty.")
self.exception.append("Error: In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Variable `postprocess_params` is empty.")
return False
if not self.observe.init:
return False
# <import and definitions>
import objects.Geometry as Geo
import objects.Plot as Plt
binary_system = self.observe
primary = binary_system.primary
secondary = binary_system.secondary
orbit = binary_system.orbit
lightcurve, norm_value = [], False
# tu natvrdo zadefinujem spots na False, lebo ked to budem v buducnosti riesit, aby na to bol kod uz
# opodmienkovany
spots = True if None != primary.spots_meta or None != secondary.spots_meta else False
# toto sa tu prekope tak, ze sa do kazdej prislusnej prida dali meta, a to t_object
# lebo tak bol postaveny kod pre triangulaciu a nechcem vynucovat zadavanie
# t_object pri inicializacii objektu Star, bolo by to redundantne pre uzivatela
if not Fn.empty(primary.spots_meta):
for i in range(0, len(primary.spots_meta)):
primary.spots_meta[i]["t_object"] = "primary"
if not Fn.empty(secondary.spots_meta):
for i in range(0, len(secondary.spots_meta)):
secondary.spots_meta[i]["t_object"] = "secondary"
# premenna, ktora definuje, ci su skrvny na zlozkach aj po prepocte (teda napr, ak tam bola jedna
# skvrna a nedalo sa ju zratat, tak uz tam nebude ziadna a nebude s nou treba uvazovat);
# zapise sa to do tohto listu po tom, co sa skvrny vytvoria a ztrianguluju, defaultne su obe hodnoty na
# False
spots_validation = {"primary": False, "secondary": False}
# v pripade, ze na hviezde nie su skvrny a kod zatial neuvazuje ziadne dynamicke efekty ako stacanie priamky
# apsid a pod. tak pre zrychlenie kodu mozno stale ratat krivku len v rozsahu 0.0 az 0.5 fazy a potom ju
# nainterpolovat
# interpolated_lightcurve = True
mirroring = False
try:
mirroring = postprocess_params["mirroring"]
except:
mirroring = True
# < /import and definitions>
# radius, azimuth, true anomaly, phase
if orbit.get_eccentricity() > 0 or not mirroring:
ffp, tfp = lightcurve_params["from_photometric_phase"], lightcurve_params["to_photometric_phase"]
elif spots:
ffp, tfp = 0.0, 1.0
else:
ffp, tfp = 0.0, 0.5
# zbuffer
zbuffer = postprocess_params["zbuffer"]
orbital_motion = orbit.orbital_motion_beta(
from_photometric_phase=ffp, to_photometric_phase=tfp, n=lightcurve_params["n"],
adaptive_multiplicator=lightcurve_params["adaptive_multiplicator"],
# multiplikacia pri zakrytoch v orbitalnom pohybe
adaptive_orbit_part=lightcurve_params["adaptive_orbit_part"], eccentricity=orbit.eccentricity,
argument_of_periastron=orbit.argument_of_periastron)
# motion_to_plot = [[item[0] * np.cos(item[1]), item[0] * np.sin(item[1])] for item in orbital_motion]
# Plt.plot_2d(points=motion_to_plot, grid=True)
if binary_system.system == "eb":
if orbit.eccentricity == 0.0 and primary.synchronicity_parameter == 1.0 and secondary.synchronicity_parameter == 1.0:
# zp = False if binary_system.binary_morph == "over-contact" else True
if binary_system.binary_morph == "detached" or binary_system.binary_morph == "semi-detached":
if starmodel_params["homo"]:
model = \
binary_system.get_3d_model_optimized(t_object="both", actual_distance=1.0,
critical_angle=np.pi / 2.0,
phi_steps=primary.phi_steps,
theta_steps=primary.theta_steps,
zero_point=True, homo=True)
primary_model, secondary_model = model["primary"], model["secondary"]
del model
else:
primary_model = \
binary_system.get_3d_model_optimized(t_object="primary", actual_distance=1.0,
critical_angle=np.pi / 2.0,
phi_steps=primary.phi_steps,
theta_steps=primary.theta_steps,
zero_point=True, homo=False)["primary"]
secondary_model = \
binary_system.get_3d_model_optimized(t_object="secondary", actual_distance=1.0,
critical_angle=np.pi / 2.0,
phi_steps=secondary.phi_steps,
theta_steps=secondary.theta_steps,
zero_point=True, homo=False)["secondary"]
if spots:
primary_spots, secondary_spots = [], []
spots_metadata = {"primary": [], "secondary": []}
if None != primary.spots_meta:
primary_spots = binary_system.create_spots(meta=primary.spots_meta)["primary"]
for spot in primary_spots:
spots_metadata["primary"].append(spot["meta"])
if None != secondary.spots_meta:
secondary_spots = binary_system.create_spots(meta=secondary.spots_meta)["secondary"]
for spot in secondary_spots:
spots_metadata["secondary"].append(spot["meta"])
spots = {"primary": primary_spots, "secondary": secondary_spots}
norms = \
{"primary": Geo.normal_estimation(binary_object=binary_system,
actual_distance=orbit.get_periastron_distance(),
vertices=np.array(primary_model), t_object="primary",
mode="in_point", verbose=True),
"secondary": Geo.normal_estimation(binary_object=binary_system,
actual_distance=orbit.get_periastron_distance(),
vertices=np.array(secondary_model),
t_object="secondary", mode="in_point", verbose=True)
}
triangulation = \
Geo.trispot(vertices={"primary": primary_model, "secondary": secondary_model},
norms=norms, spots=spots, binary_morph=binary_system.get_binary_morphology(),
metadata=spots_metadata)
primary.set_vertices(vertices=np.array(triangulation[1]["primary"]))
secondary.set_vertices(vertices=np.array(triangulation[1]["secondary"]))
primary.set_simplices(simplices=triangulation[2]["primary"])
secondary.set_simplices(simplices=triangulation[2]["secondary"])
primary.set_faces(faces=primary.get_vertices()[primary.get_simplices()])
secondary.set_faces(faces=secondary.get_vertices()[secondary.get_simplices()])
spots_validation["primary"] = True if not Fn.empty(triangulation[4]["primary"]) else False
spots_validation["secondary"] = True if not Fn.empty(triangulation[4]["secondary"]) else False
simplex_map, spots_meta = triangulation[3], triangulation[4]
# # <kontrolne plotovanie primary>
# color, colors = ["r", "g", "b", "c", "y"], []
#
# for y in simplex_map["primary"]:
# if simplex_map["primary"][y][1] == -1:
# colors.append("w")
# else:
# colors.append(color[simplex_map["primary"][y][1]])
#
# Plt.plot_3d(vertices=None, faces=[primary.get_faces()], normals_view=False,
# points_view=False, faces_view=True, face_color=[colors], point_size=10.0,
# edge_color="k", elev=45, azim=45, save=False)
# # < /kontrolne plotovanie>
#
# # <kontrolne plotovanie secondary>
# color, colors = ["r", "g", "b", "c", "y"], []
#
# for y in simplex_map["secondary"]:
# if simplex_map["secondary"][y][1] == -1:
# colors.append("w")
# else:
# colors.append(color[simplex_map["secondary"][y][1]])
#
# Plt.plot_3d(vertices=None, faces=[secondary.get_faces()], normals_view=False,
# points_view=False, faces_view=True, face_color=[colors], point_size=10.0,
# edge_color="k", elev=45, azim=45, save=False)
# # < /kontrolne plotovanie>
else:
primary.set_vertices(vertices=primary_model)
secondary.set_vertices(vertices=secondary_model)
del primary_model, secondary_model
# # <kontrolne plotovanie>
# Plt.plot_3d(vertices=[primary.get_vertices(), secondary.get_vertices()], normals_view=False,
# points_view=True, faces_view=False, point_color="r", point_size=3.0,
# verbose=self.verbose)
# # < /kontrolne plotovanie>
# convex hull triangulation
triangulation_primary = Geo.convex_hull_triangulation(vertices=primary.get_vertices(),
verbose=self.verbose)
triangulation_secondary = Geo.convex_hull_triangulation(vertices=secondary.get_vertices(),
verbose=self.verbose)
primary.set_faces(faces=triangulation_primary[1])
secondary.set_faces(faces=triangulation_secondary[1])
primary.set_simplices(simplices=triangulation_primary[0])
secondary.set_simplices(simplices=triangulation_secondary[0])
del triangulation_primary, triangulation_secondary
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[primary.get_faces(), secondary.get_faces()], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=30, elev=30, save=False,
# filename="./out/" + str(primary.effective_temperature) + "_" + str(primary.potential) + "_" + str(primary.mass) + "___" +
# str(secondary.effective_temperature) + "_" + str(secondary.potential) + "_" + str(secondary.mass) + "---" + str(orbit.orbital_period))
# exit()
# # < /kontrolne plotovanie>
elif binary_system.binary_morph == "over-contact":
# doplnit triangulaciu pre over-contact
model = \
binary_system.get_3d_model_optimized(t_object="both", actual_distance=1.0,
critical_angle=np.pi / 4.0,
phi_steps=primary.phi_steps,
theta_steps=primary.theta_steps,
zero_point=False, homo=True)
system, x_separation = model["system"], model["separation"]
primary_model, secondary_model = model["primary"], model["secondary"]
del model
# # <kontrolne plotovanie>
# Plt.plot_3d(vertices=[system], normals_view=False,
# points_view=True, faces_view=False, point_color="r", point_size=3.0,
# verbose=self.verbose)
# # < /kontrolne plotovanie>
if spots:
primary_spots, secondary_spots = [], []
spots_metadata = {"primary": [], "secondary": []}
if None != primary.spots_meta:
primary_spots = binary_system.create_spots(meta=primary.spots_meta)["primary"]
for spot in primary_spots:
spots_metadata["primary"].append(spot["meta"])
if None != secondary.spots_meta:
secondary_spots = binary_system.create_spots(meta=secondary.spots_meta)["secondary"]
for spot in secondary_spots:
spots_metadata["secondary"].append(spot["meta"])
spots = {"primary": primary_spots, "secondary": secondary_spots}
norms = \
{"primary": Geo.normal_estimation(binary_object=binary_system,
actual_distance=orbit.get_periastron_distance(),
vertices=np.array(primary_model), t_object="primary",
mode="in_point", verbose=True),
"secondary": Geo.normal_estimation(binary_object=binary_system,
actual_distance=orbit.get_periastron_distance(),
vertices=np.array(secondary_model),
t_object="secondary", mode="in_point", verbose=True)
}
triangulation = \
Geo.trispot(vertices={"primary": primary_model, "secondary": secondary_model},
norms=norms, spots=spots, binary_morph=binary_system.get_binary_morphology(),
metadata=spots_metadata)
primary.set_vertices(vertices=np.array(triangulation[1]["primary"]))
secondary.set_vertices(vertices=np.array(triangulation[1]["secondary"]))
primary.set_simplices(simplices=triangulation[2]["primary"])
secondary.set_simplices(simplices=triangulation[2]["secondary"])
primary.set_faces(faces=primary.get_vertices()[primary.get_simplices()])
secondary.set_faces(faces=secondary.get_vertices()[secondary.get_simplices()])
spots_validation["primary"] = True if not Fn.empty(triangulation[4]["primary"]) else False
spots_validation["secondary"] = True if not Fn.empty(triangulation[4]["secondary"]) else False
simplex_map, spots_meta = triangulation[3], triangulation[4]
# # <kontrolne plotovanie primary>
# color, colors = ["r", "g", "b", "c", "y"], []
#
# for y in simplex_map["primary"]:
# if simplex_map["primary"][y][1] == -1:
# colors.append("w")
# else:
# colors.append(color[simplex_map["primary"][y][1]])
#
# Plt.plot_3d(vertices=None, faces=[primary.get_faces()], normals_view=False,
# points_view=False, faces_view=True, face_color=[colors], point_size=10.0,
# edge_color="k", elev=45, azim=45, save=False)
# # < /kontrolne plotovanie>
# # <kontrolne plotovanie secondary>
# color, colors = ["r", "g", "b", "c", "y"], []
#
# for y in simplex_map["secondary"]:
# if simplex_map["secondary"][y][1] == -1:
# colors.append("w")
# else:
# colors.append(color[simplex_map["secondary"][y][1]])
#
# Plt.plot_3d(vertices=None, faces=[secondary.get_faces()], normals_view=False,
# points_view=False, faces_view=True, face_color=[colors], point_size=10.0,
# edge_color="k", elev=45, azim=45, save=False)
# # < /kontrolne plotovanie>
else:
# normals necessary for triangulation
normal_vectors = \
Geo.normal_estimation(binary_object=binary_system, actual_distance=1.0,
vertices=np.array(system),
t_object="primary",
mode="in_point", verbose=False)
# Plt.plot_3d(normals=None, vertices=[system], faces=None, face_color="w", normals_view=False, points_view=True,
# faces_view=False, point_color="r", normal_color="w", point_size=3., verbose=True, face_alpha=1.,
# azim=30, elev=30)
# cgal triangulation
# print(os.path.dirname(os.path.realpath(__file__)))
# sys.exit()
triangulation = Geo.cgal_triangulation(normals=normal_vectors, points=system, verbose=False,
min_triangle_angle=primary.cgal["min_triangle_angle"],
max_triangle_size=primary.cgal["max_triangle_size"],
surface_aproximation_error=primary.cgal[
"surface_aproximation_error"],
to_average_spacing=primary.cgal["to_average_spacing"])
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[triangulation[1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=30, elev=30)
# # < /kontrolne plotovanie>
triangulation = Geo.cgal_separation(cgal_simplex=triangulation, x_separation=x_separation)
del (system, normal_vectors)
primary.set_vertices(vertices=triangulation["vertices"]["primary"])
secondary.set_vertices(vertices=triangulation["vertices"]["secondary"])
primary.set_faces(faces=triangulation["faces"]["primary"])
secondary.set_faces(faces=triangulation["faces"]["secondary"])
primary.set_simplices(simplices=triangulation["simplices"]["primary"])
secondary.set_simplices(simplices=triangulation["simplices"]["secondary"])
faces_orientation_primary = Geo.face_orientation_beta(faces=primary.get_faces(),
binary_object=binary_system, t_object="primary",
actual_distance=1.0, verbose=self.verbose)
faces_orientation_secondary = Geo.face_orientation_beta(faces=secondary.get_faces(),
binary_object=binary_system,
t_object="secondary",
actual_distance=1.0, verbose=self.verbose)
primary.set_faces_orientation(faces_orientation=faces_orientation_primary)
secondary.set_faces_orientation(faces_orientation=faces_orientation_secondary)
del faces_orientation_primary, faces_orientation_secondary
# zapinanie a vypinanie plotovania normal
if False:
# TATO CAST KODU JE POTREBNA LEN PRE PLOTOVANIE NORMAL
# trba ich zmenist a poposuvat na pozicie faziet
# normalizacia na 1
unit_vectors_primary = \
Geo.vector_array_normalisation(vector_arr=primary.get_faces_orientation(), multi=15.0,
verbose=self.verbose)
unit_vectors_secondary = \
Geo.vector_array_normalisation(vector_arr=secondary.get_faces_orientation(), multi=15.0,
verbose=self.verbose)
faces_com_primary = Geo.center_of_mass(faces=primary.get_faces(), verbose=self.verbose)
faces_com_secondary = Geo.center_of_mass(faces=secondary.get_faces(), verbose=self.verbose)
translation_primary = \
Geo.vector_array_translation(vector_arr=unit_vectors_primary, translation_arr=faces_com_primary,
verbose=self.verbose)
translation_secondary = \
Geo.vector_array_translation(vector_arr=unit_vectors_secondary,
translation_arr=faces_com_secondary,
verbose=self.verbose)
# farby pre normaly
c_primary, c_secondary = ["#0000ff"] * len(translation_primary), ["#000055"] * len(
translation_secondary)
Plt.plot_3d(normals=[translation_primary, translation_secondary],
vertices=[faces_com_primary, faces_com_secondary],
faces=[primary.get_faces(), secondary.get_faces()], face_color="w",
normals_view=True, points_view=False, faces_view=True,
point_color=[c_primary, c_secondary],
normal_color=[c_primary, c_secondary], point_size=3.0, verbose=True, face_alpha=1.0,
azim=30, elev=30)
# KONIEC CASTI PRE ZOBRAZOVANIE NORMAL
gradnorm_primary = Geo.gradient_norm(faces=primary.get_faces(), verbose=self.verbose,
binary_object=binary_system, actual_distance=1.0,
t_object="primary")
gradnorm_secondary = Geo.gradient_norm(faces=secondary.get_faces(), verbose=self.verbose,
binary_object=binary_system, actual_distance=1.0,
t_object="secondary")
primary.set_gradient_norm(gradient_norm=gradnorm_primary)
secondary.set_gradient_norm(gradient_norm=gradnorm_secondary)
del gradnorm_primary, gradnorm_secondary
primary.compute_gravity_distribution(gradnorm=primary.get_gradient_distribution())
secondary.compute_gravity_distribution(gradnorm=secondary.get_gradient_distribution())
# <kontrolne plotovanie gravity distribution primary>
# rgb_p = Fn.arr_to_rainbow(arr=primary.get_gravity_distribution(),
# minimum=min(primary.get_gravity_distribution()),
# maximum=max(primary.get_gravity_distribution()))
#
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# < /kontrolne plotovanie gravity distribution primary>
primary.gravity_darkening_factor_distribution()
secondary.gravity_darkening_factor_distribution()
primary.compute_polar_temperature()
secondary.compute_polar_temperature()
if spots_validation["primary"]:
primary.compute_temperature_distribution(simplex_map=simplex_map["primary"],
spots_meta=spots_meta["primary"])
else:
primary.compute_temperature_distribution()
if spots_validation["secondary"]:
secondary.compute_temperature_distribution(simplex_map=simplex_map["secondary"],
spots_meta=spots_meta["secondary"])
else:
secondary.compute_temperature_distribution()
# # <kontrolne plotovanie temperature distribution primary>
# rgb_p = Fn.arr_to_rainbow(arr=primary.get_temperature_distribution(),
# minimum=min(primary.get_temperature_distribution()),
# maximum=max(primary.get_temperature_distribution()))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# # < /kontrolne plotovanie temperature distribution primary>
# # < kontrolne plotovanie temperature distribution for wuma>
# faces = np.concatenate((primary.get_faces(), secondary.get_faces()), 0)
# temperature = np.concatenate((primary.get_temperature_distribution(),
# secondary.get_temperature_distribution()), 0)
# rgb_p = Fn.arr_to_rainbow(arr=temperature,
# minimum=min(temperature),
# maximum=max(temperature))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
#
# Plt.plot_3d(faces=[faces], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# # < /kontrolne plotovanie temperature distribution for wuma>
r_pwr = [primary.radiation_power(passband_model=self.get_passband_model(), passband=self.passband,
passband_range=self.get_passband_range(), wavelength_step=10.0),
secondary.radiation_power(passband_model=self.get_passband_model(), passband=self.passband,
passband_range=self.get_passband_range(), wavelength_step=10.0)
]
if Fn.empty(r_pwr[0]) or Fn.empty(r_pwr[1]):
if self.verbose:
print(Fn.color_string(color="error",
string="ValueError: ") + "In class: Observer, function: compute_lightcurve(), line: " + str(
Fn.lineno()) + ". Invalid value (`boolean`) encountered during flux computing.")
self.exception.append(
"ValueError: In class: Observer, function: compute_lightcurve(), line: " + str(
Fn.lineno()) + ". Invalid value (`boolean`) encountered during flux computing.")
return False
primary.set_radiation_power(radiation_power=r_pwr[0])
secondary.set_radiation_power(radiation_power=r_pwr[1])
del r_pwr
# import time
# import sys
# folder = str(time.time())
# info = [primary.get_info(output=True), "\n", secondary.get_info(output=True), "\n", orbit.get_info(output=True)]
# Io.save_csv(filename="info", datafolder="./out/" + folder, data=info, delim="")
#
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[primary.get_faces(), secondary.get_faces()], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=30, elev=30, save=True,
# filename="./out/" + folder + "/" + "img", dpi=100)
# # < /kontrolne plotovanie>
# <kontrolne plotovanie radiation power distribution>
# rgb_p = Fn.arr_to_rainbow(arr=primary.get_radiation_power(),
# minimum=min(primary.get_radiation_power()),
# maximum=max(primary.get_radiation_power()))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# rgb_s = Fn.arr_to_rainbow(arr=secondary.get_radiation_power(),
# minimum=min(secondary.get_radiation_power()),
# maximum=max(secondary.get_radiation_power()))
# hex_s = Fn.rgb_to_hex(color=rgb_s, sharp=True)
# Plt.plot_3d(faces=[secondary.get_faces()], face_color=[hex_s], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30,
# x_range=[0, 2], y_range=[-1, 1], z_range=[-1, 1])
# < /kontrolne plotovanie radiation power distribution>
if not Fn.empty(limb_darkening_params, debug=False):
ld_temperature = [[limb_darkening_params["temperature_primary"]],
[limb_darkening_params["temperature_secondary"]]]
ld_gravity = [[limb_darkening_params["gravity_primary"]],
[limb_darkening_params["gravity_secondary"]]]
ld_metallicity = [limb_darkening_params["metallicity_primary"],
limb_darkening_params["metallicity_secondary"]]
else:
ld_temperature = [primary.get_temperature_distribution(), secondary.get_temperature_distribution()]
ld_gravity = [primary.get_gravity_distribution(), secondary.get_gravity_distribution()]
ld_metallicity = [primary.get_metallicity(), secondary.get_metallicity()]
ldc_primary = \
self.limb_darkening_coefficients(limb_darkeing_model=self.get_limb_darkening_model(),
passband=self.get_passband(), metallicity=ld_metallicity[0],
temperature=ld_temperature[0], gravity=ld_gravity[0],
interpolation_method=self.limb_darkening_interp_method,
verbose=self.verbose)
# <kontrolne plotovanie koeficientov okrajoveho stemnenia>
# rgb_p = Fn.arr_to_rainbow(arr=ldc_primary,
# minimum=min(ldc_primary),
# maximum=max(ldc_primary))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# < /kontrolne plotovanie koeficientov okrajoveho stemnenia>
ldc_secondary = \
self.limb_darkening_coefficients(limb_darkeing_model=self.get_limb_darkening_model(),
passband=self.get_passband(), metallicity=ld_metallicity[1],
temperature=ld_temperature[1], gravity=ld_gravity[1],
interpolation_method=self.limb_darkening_interp_method,
verbose=self.verbose)
# <kontrolne plotovanie>
# Plt.plot_3d(faces=[def_position[0][0], def_position[0][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=0, elev=0)
# < /kontrolne plotovanie>
percentage = 0.0
percentage_step = 100.0 / len(orbital_motion)
if self.verbose:
sys.stdout.write(Fn.color_string("info", "Info: ") + "Making lightcurve... \n")
sys.stdout.write("\r\t\t%d%% done\t" % percentage)
sys.stdout.flush()
for orbital_position in orbital_motion:
if self.verbose:
sys.stdout.write("\r\t\t%d%% done\t" % percentage)
sys.stdout.flush()
percentage += percentage_step
# orbital_position[1] = np.pi - (np.pi / 10)
act_position = \
orbit.rotate_system(faces=[primary.get_faces(), secondary.get_faces()],
normals=[primary.get_faces_orientation(),
secondary.get_faces_orientation()],
vertices=[primary.get_vertices(), secondary.get_vertices()],
rotation_angle=orbital_position[1], inclination_rotation=True,
faces_rotation=True, inclination=orbit.get_inclination(), verbose=False)
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[act_position[0][0], act_position[0][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=0, elev=0)
# exit()
# # < /kontrolne plotovanie>
# darkside_filter() vrati hodnoty v tvare
# [faces[primary, secondary], normals[primary, secondary], indices[primary, secondary]]
# co sa tyka indexov, jedna sa o cislovanie z povodneho pola triangulacie
darksite_filter = \
Geo.darkside_filter(faces=[act_position[0][0], act_position[0][1]],
normals=[act_position[1][0], act_position[1][1]],
verbose=False)
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[darksite_filter[0][0], darksite_filter[0][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=0, elev=0)
# # < /kontrolne plotovanie>
# eclipse_filter() vrati hodnoty v tvare
# [idx of visible faces [primary, secondary], surface are of those faces [primary, secondary]]
#
eclipse_filter = \
Geo.eclipse_filter(indices=[darksite_filter[2][0], darksite_filter[2][1]],
vertices=[act_position[2][0], act_position[2][1]],
simplices=[Fn.array_mask(primary.get_simplices(), darksite_filter[2][0]),
Fn.array_mask(secondary.get_simplices(), darksite_filter[2][1])],
orbital_angle=orbital_position[1], verbose=False, zbuffer=zbuffer,
resolution=1000)
# ak nie je viditelna ziadna zlozka, tak plocha sa nastavi na False, potom sa to dalej vyuzije
surface_primary = False if len(eclipse_filter[0][0]) == 0 else eclipse_filter[1][0]
surface_secondary = False if len(eclipse_filter[0][1]) == 0 else eclipse_filter[1][1]
surface = [surface_primary, surface_secondary]
# <kontrolne plotovanie>
# Plt.plot_3d(faces=[eclipse_filter[2][0], eclipse_filter[2][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=0, elev=0)
# < /kontrolne plotovanie>
# faktor okrajoveho stemnenia pre vyfiltrovane elementy
# struktura, ktoru vrati limb_darkening_factor() je nasledujuca
# [faktor okrajoveho stemnenia pre dany element pri danej pozicii, uhol medzi normalou elementu
# a vektorom smerom k pozorovatelovi teda vektorom (1,0,0)]
ldf_primary, ldf_secondary = 0.0, 0.0
if type(surface[0]) is not type(True):
ldf_primary = \
self.limb_darkening_factor(faces_orientation=Fn.array_mask(act_position[1][0],
eclipse_filter[0][0]),
limb_darkening_model=self.get_limb_darkening_model(),
limb_darkening_coefficients=Fn.array_mask(array=ldc_primary,
mask=eclipse_filter[0][
0]))
if type(surface[1]) is not type(True):
ldf_secondary = \
self.limb_darkening_factor(faces_orientation=Fn.array_mask(act_position[1][1],
eclipse_filter[0][1]),
limb_darkening_model=self.get_limb_darkening_model(),
limb_darkening_coefficients=Fn.array_mask(array=ldc_secondary,
mask=eclipse_filter[0][
1]))
flux = \
[0.0 if type(surface[0]) is type(True)
else self.compute_flux(e_limb_darkening_factor=ldf_primary, e_surface=surface[0],
e_flux=Fn.array_mask(array=primary.get_radiation_power(),
mask=eclipse_filter[0][0])),
0.0 if type(surface[1]) is type(True)
else self.compute_flux(e_limb_darkening_factor=ldf_secondary, e_surface=surface[1],
e_flux=Fn.array_mask(array=secondary.get_radiation_power(),
mask=eclipse_filter[0][1]))]
lightcurve.append([orbital_position[3], (flux[0] + flux[1]) * (
(orbit.get_relative_semimajor_axis() * gv.SOLAR_RADIUS) ** 2)])
if self.verbose:
sys.stdout.write("\r\t\t100% done")
sys.stdout.write("\n")
elif binary_system.system == "te":
if binary_system.planet == "roche":
pass
elif binary_system.planet == "sphere":
primary.set_vertices(vertices=primary.get_3d_model(phi_steps=primary.phi_steps,
theta_steps=primary.theta_steps))
secondary.set_vertices(vertices=secondary.get_3d_model(phi_steps=secondary.phi_steps,
theta_steps=secondary.theta_steps,
radius=secondary.get_polar_radius(),
actual_distance=orbit.get_periastron_distance()))
# <kontrolne plotovanie>
Plt.plot_3d(vertices=[primary.get_vertices(), secondary.get_vertices()], normals_view=False,
points_view=True, faces_view=False, point_color="r", point_size=3.0,
verbose=self.verbose)
# < /kontrolne plotovanie>
# convex hull triangulation
triangulation_primary = Geo.convex_hull_triangulation(vertices=primary.get_vertices(),
verbose=self.verbose)
triangulation_secondary = Geo.convex_hull_triangulation(vertices=secondary.get_vertices(),
verbose=self.verbose)
primary.set_faces(faces=triangulation_primary[1])
secondary.set_faces(faces=triangulation_secondary[1])
primary.set_simplices(simplices=triangulation_primary[0])
secondary.set_simplices(simplices=triangulation_secondary[0])
del triangulation_primary, triangulation_secondary
faces_orientation_primary = Geo.face_orientation_beta(faces=primary.get_faces(),
binary_object=binary_system, t_object="primary",
actual_distance=orbit.periastron_distance,
verbose=self.verbose)
faces_orientation_secondary = Geo.face_orientation_beta(faces=secondary.get_faces(),
binary_object=binary_system,
t_object="secondary",
actual_distance=orbit.periastron_distance,
verbose=self.verbose)
primary.set_faces_orientation(faces_orientation=faces_orientation_primary)
secondary.set_faces_orientation(faces_orientation=faces_orientation_secondary)
del faces_orientation_primary, faces_orientation_secondary
# vzidalenost tu nie je podstatna
gradnorm_primary = Geo.gradient_norm(faces=primary.get_faces(), verbose=self.verbose,
binary_object=binary_system, actual_distance=None,
t_object="primary")
primary.set_gradient_norm(gradient_norm=gradnorm_primary)
del gradnorm_primary
primary.compute_gravity_distribution(gradnorm=primary.get_gradient_distribution())
primary.compute_gravity_distribution(gradnorm=primary.get_gradient_distribution())
primary.gravity_darkening_factor_distribution()
primary.compute_polar_temperature()
primary.compute_temperature_distribution()
primary.set_temperature_distribution([primary.effective_temperature] * len(primary.get_simplices()))
primary.local_gravity = [primary.polar_gravity] * len(primary.get_simplices())
# # <kontrolne plotovanie temperature distribution primary>
# rgb_p = Fn.arr_to_rainbow(arr=primary.get_temperature_distribution(),
# minimum=min(primary.get_temperature_distribution()),
# maximum=max(primary.get_temperature_distribution()))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1., azim=30, elev=30)
# # < /kontrolne plotovanie temperature distribution primary>
r_pwr = [primary.radiation_power(passband_model=self.get_passband_model(), passband=self.passband,
passband_range=self.get_passband_range(), wavelength_step=10.0),
None]
if Fn.empty(r_pwr[0]):
if self.verbose:
print(Fn.color_string(color="error",
string="ValueError: ") + "In class: Observer, function: compute_lightcurve(), line: " + str(
Fn.lineno()) + ". Invalid value (`boolean`) encountered during flux computing.")
self.exception.append(
"ValueError: In class: Observer, function: compute_lightcurve(), line: " + str(
Fn.lineno()) + ". Invalid value (`boolean`) encountered during flux computing.")
return False
primary.set_radiation_power(radiation_power=r_pwr[0])
del r_pwr
if not Fn.empty(limb_darkening_params, debug=False):
ld_temperature = [[limb_darkening_params["temperature_primary"]]]
ld_gravity = [[limb_darkening_params["gravity_primary"]]]
ld_metallicity = [limb_darkening_params["metallicity_primary"]]
else:
ld_temperature = [primary.get_temperature_distribution()]
ld_gravity = [primary.get_gravity_distribution()]
ld_metallicity = [primary.get_metallicity()]
ldc_primary = \
self.limb_darkening_coefficients(limb_darkeing_model=self.get_limb_darkening_model(),
passband=self.get_passband(), metallicity=ld_metallicity[0],
temperature=ld_temperature[0], gravity=ld_gravity[0],
interpolation_method=self.limb_darkening_interp_method,
verbose=self.verbose)
percentage = 0.0
percentage_step = 100.0 / len(orbital_motion)
if self.verbose:
sys.stdout.write(Fn.color_string("info", "Info: ") + "Making lightcurve... \n")
sys.stdout.write("\r\t\t%d%% done\t" % percentage)
sys.stdout.flush()
for orbital_position in orbital_motion:
delta_distance = orbital_position[0] - orbit.get_periastron_distance()
delta = np.array([delta_distance, 0.0, 0.0])
secondary_vertices = secondary.get_vertices() + delta
secondary_faces = secondary_vertices[secondary.get_simplices()]
if self.verbose:
sys.stdout.write("\r\t\t%d%% done\t" % percentage)
sys.stdout.flush()
percentage += percentage_step
# orbital_position[1] = np.pi - (np.pi / 10)
act_position = \
orbit.rotate_system(faces=[primary.get_faces(), secondary_faces],
normals=[primary.get_faces_orientation(),
secondary.get_faces_orientation()],
vertices=[primary.get_vertices(), secondary_vertices],
rotation_angle=orbital_position[1], inclination_rotation=True,
faces_rotation=True, inclination=orbit.get_inclination(), verbose=False)
# # <kontrolne plotovanie temperature distribution primary>
# rgb_p = Fn.arr_to_rainbow(arr=primary.get_temperature_distribution(),
# minimum=min(primary.get_temperature_distribution()),
# maximum=max(primary.get_temperature_distribution()))
# hex_p = Fn.rgb_to_hex(color=rgb_p, sharp=True)
# Plt.plot_3d(faces=[primary.get_faces()], face_color=[hex_p], normals_view=False,
# points_view=False, faces_view=True, verbose=True, face_alpha=1.,
# azim=-np.degrees(orbital_position[1]), elev=0)
# # < /kontrolne plotovanie temperature distribution primary>
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[act_position[0][0], act_position[0][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=-90, elev=90)
# # < /kontrolne plotovanie>
# from time import time
# ddp = [[v[0], v[1]]for v in act_position[2][0]]
# dds = [[v[0], v[1]] for v in act_position[2][1]]
# v = np.concatenate((ddp, dds), 0)
#
# from matplotlib import pyplot as plt
# plt.scatter(list(zip(*v))[0], list(zip(*v))[1], s=0.5)
# plt.axis("equal")
# plt.xlim([-2.5, 2.5])
# plt.ylim([-2.5, 2.5])
# plt.scatter(orbital_position[0] * np.cos(orbital_position[1]),
# orbital_position[0] * np.sin(orbital_position[1]),
# s=20, c="r")
#
# plt.grid(True)
# plt.savefig(str(time()) + ".png")
# continue
# darkside_filter() vrati hodnoty v tvare
# [faces[primary, secondary], normals[primary, secondary], indices[primary, secondary]]
# co sa tyka indexov, jedna sa o cislovanie z povodneho pola triangulacie
darksite_filter = \
Geo.darkside_filter(faces=[act_position[0][0], act_position[0][1]],
normals=[act_position[1][0], act_position[1][1]],
verbose=False)
# # <kontrolne plotovanie>
# Plt.plot_3d(faces=[darksite_filter[0][0], darksite_filter[0][1]], face_color="w",
# normals_view=False, points_view=False, faces_view=True, verbose=self.verbose,
# face_alpha=1.0, azim=0, elev=0)
# # < /kontrolne plotovanie>
# eclipse_filter() vrati hodnoty v tvare
# [idx of visible faces [primary, secondary], surface are of those faces [primary, secondary]]
eclipse_filter = \
Geo.eclipse_filter(indices=[darksite_filter[2][0], darksite_filter[2][1]],
vertices=[act_position[2][0], act_position[2][1]],
simplices=[Fn.array_mask(primary.get_simplices(), darksite_filter[2][0]),
Fn.array_mask(secondary.get_simplices(), darksite_filter[2][1])],
orbital_angle=orbital_position[1], verbose=False, zbuffer=zbuffer,
resolution=500)
# eclipse_filter = [[[], []], [[], []], [[], []]]
# eclipse_filter[0][0] = darksite_filter[2][0]
# eclipse_filter[1][0] = Geo.triangle_surface_area(np.array(act_position[0][0])[darksite_filter[2][0]])
# eclipse_filter[2][0] = np.array(act_position[0][0])[darksite_filter[2][0]]
# ak nie je viditelna ziadna zlozka, tak plocha sa nastavi na False, potom sa to dalej vyuzije
surface_primary = False if len(eclipse_filter[0][0]) == 0 else eclipse_filter[1][0]
surface = [surface_primary, None]
ldf_primary, ldf_secondary = 0.0, 0.0
if type(surface[0]) is not type(True):
ldf_primary = \
self.limb_darkening_factor(faces_orientation=Fn.array_mask(act_position[1][0],
eclipse_filter[0][0]),
limb_darkening_model=self.get_limb_darkening_model(),
limb_darkening_coefficients=Fn.array_mask(array=ldc_primary,
mask=eclipse_filter[0][
0]))
flux = \
np.array([0.0 if type(surface[0]) is type(True)
else self.compute_flux(e_limb_darkening_factor=ldf_primary, e_surface=surface[0],
e_flux=Fn.array_mask(array=primary.get_radiation_power(),
mask=eclipse_filter[0][0])), None], dtype="float64")
lightcurve.append([orbital_position[3], (flux[0]) * (
(orbit.get_relative_semimajor_axis() * gv.SOLAR_RADIUS) ** 2)])
print(lightcurve[-1][1])
if self.verbose:
sys.stdout.write("\r\t\t100% done")
sys.stdout.write("\n")
try:
# <interpolacia krivky>
if mirroring and not spots:
norm_value = Lc.akima_interpolation(from_photometric_phase=0.0,
to_photometric_phase=0.5,
lightcurve=lightcurve, mirror=True, spots=False)
norm_value = Lc.akima_interpolation(from_photometric_phase=0.25, to_photometric_phase=0.25,
lightcurve=norm_value, mirror=False, spots=False)
interp_start, interp_stop = lightcurve_params["from_photometric_phase"], lightcurve_params[
"to_photometric_phase"]
lightcurve = Lc.akima_interpolation(from_photometric_phase=interp_start,
to_photometric_phase=interp_stop,
lightcurve=lightcurve, mirror=True)
elif mirroring and spots:
norm_value = Lc.akima_interpolation(from_photometric_phase=0.0,
to_photometric_phase=1.0,
lightcurve=lightcurve, mirror=True, spots=True)
norm_value = Lc.akima_interpolation(from_photometric_phase=0.25, to_photometric_phase=0.25,
lightcurve=norm_value, mirror=False, spots=True)
interp_start, interp_stop = lightcurve_params["from_photometric_phase"], lightcurve_params[
"to_photometric_phase"]
lightcurve = Lc.akima_interpolation(from_photometric_phase=interp_start,
to_photometric_phase=interp_stop,
lightcurve=lightcurve, mirror=True, spots=True)
# < /interpolacia krivky>
except:
if self.verbose:
print(Fn.color_string("error", "ValueError: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Error has been occured during lightcruve postprocessing")
print("Primary info:")
primary.get_info()
print("Secondary info:")
secondary.get_info()
print("Orbit info:")
orbit.get_info()
return False
# Plt.plot_2d(points=lightcurve, aspect="auto", save=True, line=True,
# filename="./out/" + folder + "/" + "lcurve_img_raw")
# Io.save_csv(filename="lc_raw", datafolder="./out/" + folder, data=lightcurve, delim="\t", rewrite=False)
try:
# <vyhladenie minim>
fix_minima = postprocess_params["fix_minima"]
if fix_minima:
eclipses = Lc.photometric_phase_of_eclipses(pp_primary=orbit.conjuction[0]["true_phase"],
pp_secondary=orbit.conjuction[1]["true_phase"],
lightcurve=lightcurve)
lightcurve = Lc.fix_minima(lightcurve=lightcurve, eclipses=eclipses)
# </vyhladenie minim>
except:
if self.verbose:
print(Fn.color_string("error", "ValueError: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Error has been occured during fixing minima process")
print("Primary info:")
primary.get_info()
print("Secondary info:")
secondary.get_info()
print("Orbit info:")
orbit.get_info()
return False
# Plt.plot_2d(points=lightcurve, aspect="auto", save=True, line=True,
# filename="./out/" + folder + "/" + "lcurve_img_fixed")
# Io.save_csv(filename="lc_fixed", datafolder="./out/" + folder, data=lightcurve, delim="\t", rewrite=False)
try:
# <smoothing krivky>
smoothed_lightcurve = postprocess_params["gaussian_smooth"]
if smoothed_lightcurve:
lightcurve = Lc.gaussian_smooth(lightcurve=lightcurve)
# < /smoothing krivky>
except:
if self.verbose:
print(Fn.color_string("error", "ValueError: ") +
"In class Observer, function compute_lightcurve(), line " + str(
Fn.lineno()) + ". Error has been occured during smoothing process")
print("Primary info:")
primary.get_info()
print("Secondary info:")
secondary.get_info()
print("Orbit info:")
orbit.get_info()
return False
# v pripade, ze doslo k fixovaniu minim, je potrebne nainterpolovat na novo krivku, aby mala potrebny pocet
# bodov pre machine learning
if fix_minima:
interp_start, interp_stop = lightcurve_params["from_photometric_phase"], lightcurve_params[
"to_photometric_phase"]
lightcurve = Lc.akima_interpolation(from_photometric_phase=interp_start,
to_photometric_phase=interp_stop,
lightcurve=lightcurve, mirror=False)
Plt.plot_2d(points=lightcurve, aspect="auto", save=False, line=True)
if not Fn.empty(norm_value):
return [lightcurve, norm_value[0][1]]
else:
return [lightcurve, 1.0]
@classmethod
def compute_flux(cls, e_limb_darkening_factor=None, e_surface=None, e_flux=None):
# flux = 0.0
# for ldf, gamma, s, f in list(zip(e_limb_darkening_factor[0], e_limb_darkening_factor[1], e_surface, e_flux)):
# flux += ldf * np.cos(gamma) * f * s
flux = np.sum(np.array(e_limb_darkening_factor[0]) * np.array(e_surface) * np.array(e_flux) * np.cos(
np.array(e_limb_darkening_factor[1])))
return flux
def get_exception(self):
return self.exception
|
SrNetoChan/Quantum-GIS
|
refs/heads/master
|
tests/src/python/test_qgstreewidgetitem.py
|
45
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsTreeWidgetItem.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/07/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.core import NULL
from qgis.gui import QgsTreeWidgetItem, QgsTreeWidgetItemObject
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QTreeWidget
from qgis.testing import start_app, unittest
try:
from qgis.PyQt.QtTest import QSignalSpy
use_signal_spy = True
except:
use_signal_spy = False
start_app()
class TestQgsTreeWidgetItem(unittest.TestCase):
def testGettersSetters(self):
""" test getters and setters """
i = QgsTreeWidgetItem()
# sort data should be empty by default
self.assertEqual(i.sortData(0), NULL)
i.setSortData(0, '5')
self.assertEqual(i.sortData(0), '5')
self.assertEqual(i.sortData(1), NULL)
i.setSortData(1, 'a')
self.assertEqual(i.sortData(0), '5')
self.assertEqual(i.sortData(1), 'a')
# should not be always on top by default
self.assertEqual(i.alwaysOnTopPriority(), -1)
i.setAlwaysOnTopPriority(1)
self.assertEqual(i.alwaysOnTopPriority(), 1)
def testSort(self):
""" test sort logic """
w = QTreeWidget()
i1 = QgsTreeWidgetItem(w)
i2 = QgsTreeWidgetItem(w)
# should default to search by display text
i1.setText(0, '2')
i1.setText(1, 'b')
i1.setText(2, 'c')
i2.setText(0, '1')
i2.setText(1, 'a')
i2.setText(2, 'd')
w.sortItems(0, Qt.AscendingOrder)
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
w.sortItems(1, Qt.AscendingOrder)
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
w.sortItems(2, Qt.AscendingOrder)
self.assertEqual(i1 < i2, True)
self.assertEqual(i2 < i1, False)
# sortData should take precedence over display text
i1.setText(1, '2')
i1.setSortData(1, '200')
i2.setText(1, '3')
w.sortItems(1, Qt.AscendingOrder)
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
i2.setSortData(1, '300')
self.assertEqual(i1 < i2, True)
self.assertEqual(i2 < i1, False)
# test that nulls are sorted before other values
i1.setSortData(0, '2')
i2.setSortData(0, NULL)
w.sortItems(0, Qt.AscendingOrder)
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
# test numeric sorting
i1.setSortData(0, '02')
i2.setSortData(0, '005')
w.sortItems(0, Qt.AscendingOrder)
self.assertEqual(i1 < i2, True)
self.assertEqual(i2 < i1, False)
# numbers should come first
i2.setSortData(0, 'a')
self.assertEqual(i1 < i2, True)
self.assertEqual(i2 < i1, False)
i1.setSortData(0, 'a')
i2.setSortData(0, '5')
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
# always on top items should be first
i1.setSortData(0, 'a')
i2.setSortData(0, 'b')
i2.setAlwaysOnTopPriority(5)
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
i1.setAlwaysOnTopPriority(3)
self.assertEqual(i1 < i2, True)
self.assertEqual(i2 < i1, False)
# otherwise fall back to sort order
i2.setAlwaysOnTopPriority(3)
i1.setSortData(0, 'c')
self.assertEqual(i1 < i2, False)
self.assertEqual(i2 < i1, True)
class TestQgsTreeWidgetItemObject(unittest.TestCase):
@unittest.skipIf(not use_signal_spy, "No QSignalSpy available")
def testItemEdited(self):
""" test that itemEdited signal is correctly emitted"""
i = QgsTreeWidgetItemObject()
item_edited_spy = QSignalSpy(i.itemEdited)
i.setData(1, Qt.EditRole, 'a')
self.assertEqual(len(item_edited_spy), 1)
i.setData(1, Qt.EditRole, 'b')
self.assertEqual(len(item_edited_spy), 2)
if __name__ == '__main__':
unittest.main()
|
wonwon0/StrategyIA
|
refs/heads/dev
|
ai/STA/Strategy/StrategyBook.py
|
1
|
# Under MIT license, see LICENSE.txt
""" Livre des stratégies. """
from typing import List
from ai.STA.Strategy.Strategy import Strategy
from ai.STA.Strategy.indiana_jones import IndianaJones
from ai.STA.Strategy.HumanControl import HumanControl
from ai.STA.Strategy.SimpleDefense import SimpleDefense
from ai.STA.Strategy.SimpleOffense import SimpleOffense
from ai.STA.Strategy.DoNothing import DoNothing
from ai.STA.Strategy.WeirdmovementStrategy import WeirdmovementStrategy
from ai.STA.Strategy.TestTransitions import TestTransitions
from ai.STA.Strategy.PerpetualMovement import PerpetualMovement
from ai.STA.Strategy.TestPasses import TestPasses
from ai.STA.Strategy.TestRotateAround import TestRotateAround
from ai.STA.Strategy.robocup_choreography import RobocupChoreography
from ai.STA.Strategy.bamba_follow import BambaFollow
class StrategyBook(object):
"""
Cette classe est capable de récupérer les stratégies enregistrés dans
la configuration des stratégies et de les exposer au Behavior Tree en
charge de sélectionner la stratégie courante.
"""
def __init__(self):
"""
Initialise le dictionnaire des stratégies présentées au reste de l'IA.
"""
self.strategy_book = {'SimpleDefense': SimpleDefense,
'SimpleOffense': SimpleOffense,
'HumanControl': HumanControl,
'DoNothing': DoNothing,
'TestTransitions': TestTransitions,
'PerpetualMovement': PerpetualMovement,
'WeirdmovementStrategy': WeirdmovementStrategy,
"IndianaJones": IndianaJones,
"TestRotateAround": TestRotateAround,
'TestPasses': TestPasses,
'RobocupChoreography': RobocupChoreography,
'BambaFollow': BambaFollow,
}
def get_strategies_name_list(self) -> List[str]:
"""
Retourne une liste des noms des stratégies disponibles à l'IA.
:return: (List[str]) une liste de string, les noms des stratégies disponibles.
"""
return list(self.strategy_book.keys())
def get_strategy(self, strategy_name: str) -> Strategy:
"""
Retourne une instance nouvelle de la stratégie correspondant au nom passé.
:param strategy_name: (str) le nom de la stratégie à retourner
:return: (Tactic) une nouvelle instance de la stratégie demandé.
"""
if self.check_existance_strategy(strategy_name):
return self.strategy_book[strategy_name]
return self.strategy_book['DoNothing']
def check_existance_strategy(self, strategy_name: str) -> bool:
"""
Regarde que la stratégie existe dans le livre des stratégies.
:param strategy_name: (str) le nom de la stratégie à évaluer l'existance.
:return: (bool) true si la stratégie existe dans le livre, false sinon.
"""
assert isinstance(strategy_name, str)
return strategy_name in self.strategy_book
|
eamontoyaa/pyCSS
|
refs/heads/master
|
examples/example02.py
|
1
|
'''
# Description.
This is a minimal module in order to perform a circular arc slope stability
analysis for the example number 02. #
'''
#-----------------------------------------------------------------------------#
### Add functions directory ###
import sys
sys.path += ['../functions']
#-----------------------------------------------------------------------------#
## Modules/Functions import
import numpy as np
import time
from onlyonecircle import onlyonecircle
#-----------------------------------------------------------------------------#
### Poject data ###
projectName = 'Example-02'
projectAuthor = 'Exneyder A. Montoya Araque'
projectDate = time.strftime("%d/%m/%y")
#-----------------------------------------------------------------------------#
### Previous calculations ###
waterUnitWeight = [9.8, 'kN/m3']
materialDryUnitWeight = [13, 'kN/m3']
specificGravity = 2.4
moisture = 0.18
voidRatio = (waterUnitWeight[0]*specificGravity/materialDryUnitWeight[0])-1
materialUnitWeight = [specificGravity*(1+moisture)*waterUnitWeight[0]/\
(1+voidRatio), 'kN/m3']
materialSatUnitWeight = [(specificGravity+voidRatio)*waterUnitWeight[0]/\
(1+voidRatio), 'kN/m3']
### Define inputs ###
# The slope geometry #
slopeHeight = [11.5, 'm']
slopeDip = np.array([3, 8])
crownDist = [15, 'm']
toeDist = [15, 'm']
wantAutomaticToeDepth = True
toeDepth = ['automatic toe Depth']
# The slip arc-circle #
hztDistPointAtCrownFromCrown = [-11, 'm']
hztDistPointAtToeFromCrown = [13.5, 'm']
slipRadius = [15.6, 'm']
# Water table depth #
wantWatertable = True
wtDepthAtCrown = [3.7, 'm']
toeUnderWatertable = False
# Materials properties #
waterUnitWeight = waterUnitWeight[:]
materialUnitWeight = materialSatUnitWeight[:]
frictionAngleGrad = [21, 'degrees']
cohesion = [4.5, 'kPa']
### Advanced inputs ###
# Want divide the slip surface in constant width slices? #
wantConstSliceWidthTrue = True
# Number of discretizations of slip surface. #
numSlices = 10
# Number of discretizations of circular arcs. #
nDivs = numSlices
# Select the method to calcualte the safety factor ['Flns', 'Bshp' or 'Allm'] #
methodString = 'Bshp'
# Select the output format image ['.eps', '.jpeg', '.jpg', '.pdf', '.pgf', \ #
# '.png', '.ps', '.raw', '.rgba', '.svg', '.svgz', '.tif', '.tiff']. #
outputFormatImg = '.svg'
#-----------------------------------------------------------------------------#
# Operations for only one slip surface #
msg = onlyonecircle(projectName, projectAuthor, projectDate, slopeHeight, \
slopeDip, crownDist, toeDist, wantAutomaticToeDepth, toeDepth, \
hztDistPointAtCrownFromCrown, hztDistPointAtToeFromCrown, \
slipRadius, wantWatertable, wtDepthAtCrown, toeUnderWatertable, \
waterUnitWeight, materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O.
Suarez-Burgoa and Exneyder Andrés Montoya Araque.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
|
power12317/you-get
|
refs/heads/develop
|
src/you_get/util/log.py
|
20
|
#!/usr/bin/env python
# This file is Python 2 compliant.
from .. import __name__ as library_name
import os, sys
IS_ANSI_TERMINAL = os.getenv('TERM') in (
'eterm-color',
'linux',
'screen',
'vt100',
'xterm')
# ANSI escape code
# See <http://en.wikipedia.org/wiki/ANSI_escape_code>
RESET = 0
BOLD = 1
UNDERLINE = 4
NEGATIVE = 7
NO_BOLD = 21
NO_UNDERLINE = 24
POSITIVE = 27
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHT_GRAY = 37
DEFAULT = 39
BLACK_BACKGROUND = 40
RED_BACKGROUND = 41
GREEN_BACKGROUND = 42
YELLOW_BACKGROUND = 43
BLUE_BACKGROUND = 44
MAGENTA_BACKGROUND = 45
CYAN_BACKGROUND = 46
LIGHT_GRAY_BACKGROUND = 47
DEFAULT_BACKGROUND = 49
DARK_GRAY = 90 # xterm
LIGHT_RED = 91 # xterm
LIGHT_GREEN = 92 # xterm
LIGHT_YELLOW = 93 # xterm
LIGHT_BLUE = 94 # xterm
LIGHT_MAGENTA = 95 # xterm
LIGHT_CYAN = 96 # xterm
WHITE = 97 # xterm
DARK_GRAY_BACKGROUND = 100 # xterm
LIGHT_RED_BACKGROUND = 101 # xterm
LIGHT_GREEN_BACKGROUND = 102 # xterm
LIGHT_YELLOW_BACKGROUND = 103 # xterm
LIGHT_BLUE_BACKGROUND = 104 # xterm
LIGHT_MAGENTA_BACKGROUND = 105 # xterm
LIGHT_CYAN_BACKGROUND = 106 # xterm
WHITE_BACKGROUND = 107 # xterm
def sprint(text, *colors):
"""Format text with color or other effects into ANSI escaped string."""
return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
def println(text, *colors):
"""Print text to standard output."""
sys.stdout.write(sprint(text, *colors) + "\n")
def print_err(text, *colors):
"""Print text to standard error."""
sys.stderr.write(sprint(text, *colors) + "\n")
def print_log(text, *colors):
"""Print a log message to standard error."""
sys.stderr.write(sprint("{}: {}".format(library_name, text), *colors) + "\n")
def i(message):
"""Print a normal log message."""
print_log(message)
def d(message):
"""Print a debug log message."""
print_log(message, BLUE)
def w(message):
"""Print a warning log message."""
print_log(message, YELLOW)
def e(message, exit_code=None):
"""Print an error log message."""
print_log(message, YELLOW, BOLD)
if exit_code is not None:
exit(exit_code)
def wtf(message, exit_code=1):
"""What a Terrible Failure!"""
print_log(message, RED, BOLD)
if exit_code is not None:
exit(exit_code)
|
arborh/tensorflow
|
refs/heads/master
|
tensorflow/python/util/tf_inspect_test.py
|
11
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_inspect."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def test_decorator(decorator_name, decorator_doc=None):
def make_tf_decorator(target):
return tf_decorator.TFDecorator(decorator_name, target, decorator_doc)
return make_tf_decorator
def test_undecorated_function():
pass
@test_decorator('decorator 1')
@test_decorator('decorator 2')
@test_decorator('decorator 3')
def test_decorated_function(x):
"""Test Decorated Function Docstring."""
return x * 2
@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
@test_decorator('decorator')
class TestDecoratedClass(object):
"""Test Decorated Class."""
def __init__(self):
pass
def two(self):
return 2
class TfInspectTest(test.TestCase):
def testCurrentFrame(self):
self.assertEqual(inspect.currentframe(), tf_inspect.currentframe())
def testGetArgSpecOnDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.getargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetArgSpecOnDecoratorThatChangesArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
def testGetArgSpecIgnoresDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecReturnsOutermostDecoratorThatChangesArgspec(self):
outer_argspec = tf_inspect.ArgSpec(
args=['a'], varargs=None, keywords=None, defaults=None)
inner_argspec = tf_inspect.ArgSpec(
args=['b'], varargs=None, keywords=None, defaults=None)
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecOnPartialPositionalArgumentOnly(self):
"""Tests getargspec on partial function with only positional arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialArgumentWithConvertibleToFalse(self):
"""Tests getargspec on partial function with args that convert to False."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, m=0)
exception_message = (r"Some arguments \['n'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialInvalidArgspec(self):
"""Tests getargspec on partial function that doesn't have valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7)
exception_message = (r"Some arguments \['l'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialValidArgspec(self):
"""Tests getargspec on partial function with valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7, l=2)
argspec = tf_inspect.ArgSpec(
args=['m', 'n', 'l', 'k'],
varargs=None,
keywords=None,
defaults=(7, 2, 4))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.ArgSpec(
args=[], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgument(self):
"""Tests getargspec on partial function that prunes some arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(7,))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgumentWithDefaultValue(self):
"""Tests getargspec on partial function that prunes argument by keyword."""
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarargs(self):
"""Tests getargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.ArgSpec(
args=[], varargs='arg', keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarkwargs(self):
"""Tests getargspec on partial function with variable keyword arguments."""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords='kwarg', defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecorator(self):
"""Tests getargspec on decorated partial function."""
@test_decorator('decorator')
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecoratorThatChangesArgspec(self):
"""Tests getargspec on partial function with decorated argspec."""
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
partial_argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(2, 1, 'hello'))
partial_with_decorator = functools.partial(decorator, a=2)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
self.assertEqual(partial_argspec,
tf_inspect.getargspec(partial_with_decorator))
def testGetArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getargspec(test_obj))
def testGetArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(InitClass))
def testGetArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(NewClass))
def testGetFullArgSpecOnDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.getfullargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetFullArgSpecOnDecoratorThatChangesFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getfullargspec(decorator))
def testGetFullArgSpecIgnoresDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgSpecReturnsOutermostDecoratorThatChangesFullArgSpec(self):
outer_argspec = tf_inspect.FullArgSpec(
args=['a'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgsSpecForPartial(self):
def func(a, b):
del a, b
partial_function = functools.partial(func, 1)
argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_function))
def testGetFullArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getfullargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarargs(self):
"""Tests getfullargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs='arg',
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarkwargs(self):
"""Tests getfullargspec.
Tests on partial function with variable keyword arguments.
"""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.FullArgSpec(
args=['n'],
varargs=None,
varkw='kwarg',
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getfullargspec(test_obj))
def testGetFullArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(InitClass))
def testGetFullArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(NewClass))
def testGetDoc(self):
self.assertEqual('Test Decorated Function With Defaults Docstring.',
tf_inspect.getdoc(test_decorated_function_with_defaults))
def testGetFile(self):
self.assertTrue('tf_inspect_test.py' in tf_inspect.getfile(
test_decorated_function_with_defaults))
self.assertTrue('tf_decorator.py' in tf_inspect.getfile(
test_decorator('decorator')(tf_decorator.unwrap)))
def testGetMembers(self):
self.assertEqual(
inspect.getmembers(TestDecoratedClass),
tf_inspect.getmembers(TestDecoratedClass))
def testGetModule(self):
self.assertEqual(
inspect.getmodule(TestDecoratedClass),
tf_inspect.getmodule(TestDecoratedClass))
self.assertEqual(
inspect.getmodule(test_decorated_function),
tf_inspect.getmodule(test_decorated_function))
self.assertEqual(
inspect.getmodule(test_undecorated_function),
tf_inspect.getmodule(test_undecorated_function))
def testGetSource(self):
expected = '''@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
'''
self.assertEqual(
expected, tf_inspect.getsource(test_decorated_function_with_defaults))
def testGetSourceFile(self):
self.assertEqual(
__file__,
tf_inspect.getsourcefile(test_decorated_function_with_defaults))
def testGetSourceLines(self):
expected = inspect.getsourcelines(
test_decorated_function_with_defaults.decorated_target)
self.assertEqual(
expected,
tf_inspect.getsourcelines(test_decorated_function_with_defaults))
def testIsBuiltin(self):
self.assertEqual(
tf_inspect.isbuiltin(TestDecoratedClass),
inspect.isbuiltin(TestDecoratedClass))
self.assertEqual(
tf_inspect.isbuiltin(test_decorated_function),
inspect.isbuiltin(test_decorated_function))
self.assertEqual(
tf_inspect.isbuiltin(test_undecorated_function),
inspect.isbuiltin(test_undecorated_function))
self.assertEqual(tf_inspect.isbuiltin(range), inspect.isbuiltin(range))
self.assertEqual(tf_inspect.isbuiltin(max), inspect.isbuiltin(max))
def testIsClass(self):
self.assertTrue(tf_inspect.isclass(TestDecoratedClass))
self.assertFalse(tf_inspect.isclass(test_decorated_function))
def testIsFunction(self):
self.assertTrue(tf_inspect.isfunction(test_decorated_function))
self.assertFalse(tf_inspect.isfunction(TestDecoratedClass))
def testIsMethod(self):
self.assertTrue(tf_inspect.ismethod(TestDecoratedClass().two))
self.assertFalse(tf_inspect.ismethod(test_decorated_function))
def testIsModule(self):
self.assertTrue(
tf_inspect.ismodule(inspect.getmodule(inspect.currentframe())))
self.assertFalse(tf_inspect.ismodule(test_decorated_function))
def testIsRoutine(self):
self.assertTrue(tf_inspect.isroutine(len))
self.assertFalse(tf_inspect.isroutine(TestDecoratedClass))
def testStack(self):
expected_stack = inspect.stack()
actual_stack = tf_inspect.stack()
self.assertEqual(len(expected_stack), len(actual_stack))
self.assertEqual(expected_stack[0][0], actual_stack[0][0]) # Frame object
self.assertEqual(expected_stack[0][1], actual_stack[0][1]) # Filename
self.assertEqual(expected_stack[0][2],
actual_stack[0][2] - 1) # Line number
self.assertEqual(expected_stack[0][3], actual_stack[0][3]) # Function name
self.assertEqual(expected_stack[1:], actual_stack[1:])
class TfInspectGetCallArgsTest(test.TestCase):
def testReturnsEmptyWhenUnboundFuncHasNoParameters(self):
def empty():
pass
self.assertEqual({}, tf_inspect.getcallargs(empty))
def testClashingParameterNames(self):
def func(positional, func=1, func_and_positional=2, kwargs=3):
return positional, func, func_and_positional, kwargs
kwargs = {}
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 1,
'func_and_positional': 2,
'kwargs': 3
})
kwargs = dict(func=4, func_and_positional=5, kwargs=6)
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 4,
'func_and_positional': 5,
'kwargs': 6
})
def testUnboundFuncWithOneParamPositional(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsPositional(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 10, 'b': 20}, tf_inspect.getcallargs(func, 10, 20))
def testUnboundFuncWithOneParamKeyword(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, a=5))
def testUnboundFuncWithTwoParamsKeyword(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 6, 'b': 7}, tf_inspect.getcallargs(func, a=6, b=7))
def testUnboundFuncWithOneParamDefault(self):
def func(a=13):
return a
self.assertEqual({'a': 13}, tf_inspect.getcallargs(func))
def testUnboundFuncWithOneParamDefaultOnePositional(self):
def func(a=0):
return a
self.assertEqual({'a': 1}, tf_inspect.getcallargs(func, 1))
def testUnboundFuncWithTwoParamsDefaultOnePositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 5, 'b': 2}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsDefaultTwoPositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, 3, 4))
def testUnboundFuncWithOneParamDefaultOneKeyword(self):
def func(a=1):
return a
self.assertEqual({'a': 3}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordFirst(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 2}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordSecond(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 1, 'b': 4}, tf_inspect.getcallargs(func, b=4))
def testUnboundFuncWithTwoParamsDefaultTwoKeywords(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, a=3, b=4))
def testBoundFuncWithOneParam(self):
class Test(object):
def bound(self):
pass
t = Test()
self.assertEqual({'self': t}, tf_inspect.getcallargs(t.bound))
def testBoundFuncWithManyParamsAndDefaults(self):
class Test(object):
def bound(self, a, b=2, c='Hello'):
return (a, b, c)
t = Test()
self.assertEqual({
'self': t,
'a': 3,
'b': 2,
'c': 'Goodbye'
}, tf_inspect.getcallargs(t.bound, 3, c='Goodbye'))
def testClassMethod(self):
class Test(object):
@classmethod
def test(cls, a, b=3, c='hello'):
return (a, b, c)
self.assertEqual({
'cls': Test,
'a': 5,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(Test.test, 5, c='goodbye'))
def testUsesOutermostDecoratorsArgSpec(self):
def func():
pass
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
decorated = tf_decorator.make_decorator(
func,
wrapper,
decorator_argspec=tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(3, 'hello')))
self.assertEqual({
'a': 4,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(decorated, 4, c='goodbye'))
if __name__ == '__main__':
test.main()
|
tillahoffmann/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py
|
41
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testNamePropertyIsSetByInitArg(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [0., 1.]]
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy")
self.assertEqual(mvn.name, "Billy")
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.test_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.test_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.test_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.test_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
|
jsoref/django
|
refs/heads/master
|
tests/empty/tests.py
|
537
|
from django.test import TestCase
from .models import Empty
class EmptyModelTests(TestCase):
def test_empty(self):
m = Empty()
self.assertIsNone(m.id)
m.save()
Empty.objects.create()
self.assertEqual(len(Empty.objects.all()), 2)
self.assertIsNotNone(m.id)
existing = Empty(m.id)
existing.save()
|
MattRijk/django-ecomsite
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py
|
3126
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
SOKP/external_chromium_org
|
refs/heads/sokp-l5.1
|
build/android/pylib/device/adb_wrapper.py
|
36
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module wraps Android's adb tool.
This is a thin wrapper around the adb interface. Any additional complexity
should be delegated to a higher level (ex. DeviceUtils).
"""
import errno
import os
from pylib import cmd_helper
from pylib.device import decorators
from pylib.device import device_errors
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 2
def _VerifyLocalFileExists(path):
"""Verifies a local file exists.
Args:
path: Path to the local file.
Raises:
IOError: If the file doesn't exist.
"""
if not os.path.exists(path):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), path)
class AdbWrapper(object):
"""A wrapper around a local Android Debug Bridge executable."""
def __init__(self, device_serial):
"""Initializes the AdbWrapper.
Args:
device_serial: The device serial number as a string.
"""
self._device_serial = str(device_serial)
# pylint: disable=W0613
@classmethod
@decorators.WithTimeoutAndRetries
def _RunAdbCmd(cls, arg_list, timeout=None, retries=None, check_error=True):
cmd = ['adb'] + arg_list
exit_code, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if exit_code != 0:
raise device_errors.AdbCommandFailedError(
cmd, 'returned non-zero exit code %s, output: %s' %
(exit_code, output))
# This catches some errors, including when the device drops offline;
# unfortunately adb is very inconsistent with error reporting so many
# command failures present differently.
if check_error and output[:len('error:')] == 'error:':
raise device_errors.AdbCommandFailedError(arg_list, output)
return output
# pylint: enable=W0613
def _DeviceAdbCmd(self, arg_list, timeout, retries, check_error=True):
"""Runs an adb command on the device associated with this object.
Args:
arg_list: A list of arguments to adb.
timeout: Timeout in seconds.
retries: Number of retries.
check_error: Check that the command doesn't return an error message. This
does NOT check the return code of shell commands.
Returns:
The output of the command.
"""
return self._RunAdbCmd(
['-s', self._device_serial] + arg_list, timeout=timeout,
retries=retries, check_error=check_error)
def __eq__(self, other):
"""Consider instances equal if they refer to the same device.
Args:
other: The instance to compare equality with.
Returns:
True if the instances are considered equal, false otherwise.
"""
return self._device_serial == str(other)
def __str__(self):
"""The string representation of an instance.
Returns:
The device serial number as a string.
"""
return self._device_serial
def __repr__(self):
return '%s(\'%s\')' % (self.__class__.__name__, self)
# TODO(craigdh): Determine the filter criteria that should be supported.
@classmethod
def GetDevices(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Get the list of active attached devices.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Yields:
AdbWrapper instances.
"""
output = cls._RunAdbCmd(['devices'], timeout=timeout, retries=retries)
lines = [line.split() for line in output.split('\n')]
return [AdbWrapper(line[0]) for line in lines
if len(line) == 2 and line[1] == 'device']
def GetDeviceSerial(self):
"""Gets the device serial number associated with this object.
Returns:
Device serial number as a string.
"""
return self._device_serial
def Push(self, local, remote, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Pushes a file from the host to the device.
Args:
local: Path on the host filesystem.
remote: Path on the device filesystem.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(local)
self._DeviceAdbCmd(['push', local, remote], timeout, retries)
def Pull(self, remote, local, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Pulls a file from the device to the host.
Args:
remote: Path on the device filesystem.
local: Path on the host filesystem.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['pull', remote, local], timeout, retries)
_VerifyLocalFileExists(local)
def Shell(self, command, expect_rc=None, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Runs a shell command on the device.
Args:
command: The shell command to run.
expect_rc: (optional) If set checks that the command's return code matches
this value.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
The output of the shell command as a string.
Raises:
device_errors.AdbCommandFailedError: If the return code doesn't match
|expect_rc|.
"""
if expect_rc is None:
actual_command = command
else:
actual_command = '%s; echo $?;' % command
output = self._DeviceAdbCmd(
['shell', actual_command], timeout, retries, check_error=False)
if expect_rc is not None:
output_end = output.rstrip().rfind('\n') + 1
rc = output[output_end:].strip()
output = output[:output_end]
if int(rc) != expect_rc:
raise device_errors.AdbCommandFailedError(
['shell', command],
'shell command exited with code: %s' % rc,
self._device_serial)
return output
def Logcat(self, filter_spec=None, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Get the logcat output.
Args:
filter_spec: (optional) Spec to filter the logcat.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
logcat output as a string.
"""
cmd = ['logcat']
if filter_spec is not None:
cmd.append(filter_spec)
return self._DeviceAdbCmd(cmd, timeout, retries, check_error=False)
def Forward(self, local, remote, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Forward socket connections from the local socket to the remote socket.
Sockets are specified by one of:
tcp:<port>
localabstract:<unix domain socket name>
localreserved:<unix domain socket name>
localfilesystem:<unix domain socket name>
dev:<character device name>
jdwp:<process pid> (remote only)
Args:
local: The host socket.
remote: The device socket.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['forward', str(local), str(remote)], timeout, retries)
def JDWP(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""List of PIDs of processes hosting a JDWP transport.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
A list of PIDs as strings.
"""
return [a.strip() for a in
self._DeviceAdbCmd(['jdwp'], timeout, retries).split('\n')]
def Install(self, apk_path, forward_lock=False, reinstall=False,
sd_card=False, timeout=60*2, retries=_DEFAULT_RETRIES):
"""Install an apk on the device.
Args:
apk_path: Host path to the APK file.
forward_lock: (optional) If set forward-locks the app.
reinstall: (optional) If set reinstalls the app, keeping its data.
sd_card: (optional) If set installs on the SD card.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(apk_path)
cmd = ['install']
if forward_lock:
cmd.append('-l')
if reinstall:
cmd.append('-r')
if sd_card:
cmd.append('-s')
cmd.append(apk_path)
output = self._DeviceAdbCmd(cmd, timeout, retries)
if 'Success' not in output:
raise device_errors.AdbCommandFailedError(cmd, output)
def Uninstall(self, package, keep_data=False, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Remove the app |package| from the device.
Args:
package: The package to uninstall.
keep_data: (optional) If set keep the data and cache directories.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
cmd = ['uninstall']
if keep_data:
cmd.append('-k')
cmd.append(package)
output = self._DeviceAdbCmd(cmd, timeout, retries)
if 'Failure' in output:
raise device_errors.AdbCommandFailedError(cmd, output)
def Backup(self, path, packages=None, apk=False, shared=False,
nosystem=True, include_all=False, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Write an archive of the device's data to |path|.
Args:
path: Local path to store the backup file.
packages: List of to packages to be backed up.
apk: (optional) If set include the .apk files in the archive.
shared: (optional) If set buckup the device's SD card.
nosystem: (optional) If set exclude system applications.
include_all: (optional) If set back up all installed applications and
|packages| is optional.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
cmd = ['backup', path]
if apk:
cmd.append('-apk')
if shared:
cmd.append('-shared')
if nosystem:
cmd.append('-nosystem')
if include_all:
cmd.append('-all')
if packages:
cmd.extend(packages)
assert bool(packages) ^ bool(include_all), (
'Provide \'packages\' or set \'include_all\' but not both.')
ret = self._DeviceAdbCmd(cmd, timeout, retries)
_VerifyLocalFileExists(path)
return ret
def Restore(self, path, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Restore device contents from the backup archive.
Args:
path: Host path to the backup archive.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(path)
self._DeviceAdbCmd(['restore'] + [path], timeout, retries)
def WaitForDevice(self, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Block until the device is online.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['wait-for-device'], timeout, retries)
def GetState(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Get device state.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
One of 'offline', 'bootloader', or 'device'.
"""
return self._DeviceAdbCmd(['get-state'], timeout, retries).strip()
def GetDevPath(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Gets the device path.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
The device path (e.g. usb:3-4)
"""
return self._DeviceAdbCmd(['get-devpath'], timeout, retries)
def Remount(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Remounts the /system partition on the device read-write."""
self._DeviceAdbCmd(['remount'], timeout, retries)
def Reboot(self, to_bootloader=False, timeout=60*5,
retries=_DEFAULT_RETRIES):
"""Reboots the device.
Args:
to_bootloader: (optional) If set reboots to the bootloader.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
if to_bootloader:
cmd = ['reboot-bootloader']
else:
cmd = ['reboot']
self._DeviceAdbCmd(cmd, timeout, retries)
def Root(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Restarts the adbd daemon with root permissions, if possible.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
output = self._DeviceAdbCmd(['root'], timeout, retries)
if 'cannot' in output:
raise device_errors.AdbCommandFailedError(['root'], output)
|
marcusrehm/serenata-de-amor
|
refs/heads/master
|
jarbas/core/migrations/0023_add_last_update_field_to_reimbursements.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-25 19:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_unique_together_from_reimbursement'),
]
operations = [
migrations.AddField(
model_name='reimbursement',
name='last_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Last update'),
),
]
|
tylert/chirp.hg
|
refs/heads/master
|
chirp/drivers/id51.py
|
2
|
# Copyright 2012 Dan Smith <dsmith@danplanet.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from chirp.drivers import id31
from chirp import directory, bitwise
MEM_FORMAT = """
struct {
u24 freq;
u16 offset;
u16 rtone:6,
ctone:6,
unknown2:1,
mode:3;
u8 dtcs;
u8 tune_step:4,
unknown5:4;
u8 unknown4;
u8 tmode:4,
duplex:2,
dtcs_polarity:2;
char name[16];
u8 unknown13;
u8 urcall[7];
u8 rpt1call[7];
u8 rpt2call[7];
} memory[500];
#seekto 0x6A40;
u8 used_flags[70];
#seekto 0x6A86;
u8 skip_flags[69];
#seekto 0x6ACB;
u8 pskp_flags[69];
#seekto 0x6B40;
struct {
u8 bank;
u8 index;
} banks[500];
#seekto 0x6FD0;
struct {
char name[16];
} bank_names[26];
#seekto 0xA8C0;
struct {
u24 freq;
u16 offset;
u8 unknown1[3];
u8 call[7];
char name[16];
char subname[8];
u8 unknown3[10];
} repeaters[750];
#seekto 0x1384E;
struct {
u8 call[7];
} rptcall[750];
#seekto 0x14E60;
struct {
char call[8];
char tag[4];
} mycall[6];
#seekto 0x14EA8;
struct {
char call[8];
} urcall[200];
"""
LOG = logging.getLogger(__name__)
@directory.register
class ID51Radio(id31.ID31Radio):
"""Icom ID-51"""
MODEL = "ID-51"
_memsize = 0x1FB40
_model = "\x33\x90\x00\x01"
_endframe = "Icom Inc\x2E\x44\x41"
_ranges = [(0x00000, 0x1FB40, 32)]
MODES = {0: "FM", 1: "NFM", 3: "AM", 5: "DV"}
@classmethod
def match_model(cls, filedata, filename):
"""Given contents of a stored file (@filedata), return True if
this radio driver handles the represented model"""
# The default check for ICOM is just to check memory size
# Since the ID-51 and ID-51 Plus/Anniversary have exactly
# the same memory size, we need to do a more detailed check.
if len(filedata) == cls._memsize:
LOG.debug('File has correct memory size, '
'checking 20 bytes at offset 0x1AF40')
snip = filedata[0x1AF40:0x1AF60]
if snip == ('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'):
LOG.debug('bytes matched ID-51 Signature')
return True
else:
LOG.debug('bytes did not match ID-51 Signature')
return False
def get_features(self):
rf = super(ID51Radio, self).get_features()
rf.valid_bands = [(108000000, 174000000), (400000000, 479000000)]
return rf
def process_mmap(self):
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
|
ProfessorX/Config
|
refs/heads/master
|
.PyCharm30/system/python_stubs/-1247972723/PyQt4/QtGui/__init__/QAbstractScrollArea.py
|
2
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from QFrame import QFrame
class QAbstractScrollArea(QFrame):
""" QAbstractScrollArea(QWidget parent=None) """
def addScrollBarWidget(self, QWidget, Qt_Alignment): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.addScrollBarWidget(QWidget, Qt.Alignment) """
pass
def contextMenuEvent(self, QContextMenuEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.contextMenuEvent(QContextMenuEvent) """
pass
def cornerWidget(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.cornerWidget() -> QWidget """
return QWidget
def dragEnterEvent(self, QDragEnterEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.dragEnterEvent(QDragEnterEvent) """
pass
def dragLeaveEvent(self, QDragLeaveEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.dragLeaveEvent(QDragLeaveEvent) """
pass
def dragMoveEvent(self, QDragMoveEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.dragMoveEvent(QDragMoveEvent) """
pass
def dropEvent(self, QDropEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.dropEvent(QDropEvent) """
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.event(QEvent) -> bool """
return False
def horizontalScrollBar(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.horizontalScrollBar() -> QScrollBar """
return QScrollBar
def horizontalScrollBarPolicy(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.horizontalScrollBarPolicy() -> Qt.ScrollBarPolicy """
pass
def keyPressEvent(self, QKeyEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.keyPressEvent(QKeyEvent) """
pass
def maximumViewportSize(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.maximumViewportSize() -> QSize """
pass
def minimumSizeHint(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.minimumSizeHint() -> QSize """
pass
def mouseDoubleClickEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.mouseDoubleClickEvent(QMouseEvent) """
pass
def mouseMoveEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.mouseMoveEvent(QMouseEvent) """
pass
def mousePressEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.mousePressEvent(QMouseEvent) """
pass
def mouseReleaseEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.mouseReleaseEvent(QMouseEvent) """
pass
def paintEvent(self, QPaintEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.paintEvent(QPaintEvent) """
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.resizeEvent(QResizeEvent) """
pass
def scrollBarWidgets(self, Qt_Alignment): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.scrollBarWidgets(Qt.Alignment) -> list-of-QWidget """
pass
def scrollContentsBy(self, p_int, p_int_1): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.scrollContentsBy(int, int) """
pass
def setCornerWidget(self, QWidget): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setCornerWidget(QWidget) """
pass
def setHorizontalScrollBar(self, QScrollBar): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setHorizontalScrollBar(QScrollBar) """
pass
def setHorizontalScrollBarPolicy(self, Qt_ScrollBarPolicy): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy) """
pass
def setupViewport(self, QWidget): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setupViewport(QWidget) """
pass
def setVerticalScrollBar(self, QScrollBar): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setVerticalScrollBar(QScrollBar) """
pass
def setVerticalScrollBarPolicy(self, Qt_ScrollBarPolicy): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy) """
pass
def setViewport(self, QWidget): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.setViewport(QWidget) """
pass
def setViewportMargins(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QAbstractScrollArea.setViewportMargins(int, int, int, int)
QAbstractScrollArea.setViewportMargins(QMargins)
"""
pass
def sizeHint(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.sizeHint() -> QSize """
pass
def verticalScrollBar(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.verticalScrollBar() -> QScrollBar """
return QScrollBar
def verticalScrollBarPolicy(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.verticalScrollBarPolicy() -> Qt.ScrollBarPolicy """
pass
def viewport(self): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.viewport() -> QWidget """
return QWidget
def viewportEvent(self, QEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.viewportEvent(QEvent) -> bool """
return False
def wheelEvent(self, QWheelEvent): # real signature unknown; restored from __doc__
""" QAbstractScrollArea.wheelEvent(QWheelEvent) """
pass
def __init__(self, QWidget_parent=None): # real signature unknown; restored from __doc__
pass
|
SINGROUP/pycp2k
|
refs/heads/master
|
pycp2k/classes/_configuration1.py
|
1
|
from pycp2k.inputsection import InputSection
class _configuration1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Glb_conf = None
self.Sub_conf = None
self.Multiplicity = None
self.Charge = None
self._name = "CONFIGURATION"
self._keywords = {'Glb_conf': 'GLB_CONF', 'Multiplicity': 'MULTIPLICITY', 'Sub_conf': 'SUB_CONF', 'Charge': 'CHARGE'}
self._aliases = {'Multip': 'Multiplicity'}
@property
def Multip(self):
"""
See documentation for Multiplicity
"""
return self.Multiplicity
@Multip.setter
def Multip(self, value):
self.Multiplicity = value
|
pschmitt/home-assistant
|
refs/heads/dev
|
homeassistant/components/luftdaten/sensor.py
|
9
|
"""Support for Luftdaten sensors."""
import logging
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_SHOW_ON_MAP,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from . import (
DATA_LUFTDATEN,
DATA_LUFTDATEN_CLIENT,
DEFAULT_ATTRIBUTION,
DOMAIN,
SENSORS,
TOPIC_UPDATE,
)
from .const import ATTR_SENSOR_ID
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Luftdaten sensor based on a config entry."""
luftdaten = hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT][entry.entry_id]
sensors = []
for sensor_type in luftdaten.sensor_conditions:
try:
name, icon, unit = SENSORS[sensor_type]
except KeyError:
_LOGGER.debug("Unknown sensor value type: %s", sensor_type)
continue
sensors.append(
LuftdatenSensor(
luftdaten, sensor_type, name, icon, unit, entry.data[CONF_SHOW_ON_MAP]
)
)
async_add_entities(sensors, True)
class LuftdatenSensor(Entity):
"""Implementation of a Luftdaten sensor."""
def __init__(self, luftdaten, sensor_type, name, icon, unit, show):
"""Initialize the Luftdaten sensor."""
self._async_unsub_dispatcher_connect = None
self.luftdaten = luftdaten
self._icon = icon
self._name = name
self._data = None
self.sensor_type = sensor_type
self._unit_of_measurement = unit
self._show_on_map = show
self._attrs = {}
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def state(self):
"""Return the state of the device."""
if self._data is not None:
return self._data[self.sensor_type]
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self) -> str:
"""Return a unique, friendly identifier for this entity."""
if self._data is not None:
return f"{self._data['sensor_id']}_{self.sensor_type}"
@property
def device_state_attributes(self):
"""Return the state attributes."""
self._attrs[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION
if self._data is not None:
self._attrs[ATTR_SENSOR_ID] = self._data["sensor_id"]
on_map = ATTR_LATITUDE, ATTR_LONGITUDE
no_map = "lat", "long"
lat_format, lon_format = on_map if self._show_on_map else no_map
try:
self._attrs[lon_format] = self._data["longitude"]
self._attrs[lat_format] = self._data["latitude"]
return self._attrs
except KeyError:
return
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def async_update(self):
"""Get the latest data and update the state."""
try:
self._data = self.luftdaten.data[DATA_LUFTDATEN]
except KeyError:
return
|
asnorkin/sentiment_analysis
|
refs/heads/master
|
site/lib/python2.7/site-packages/numpy/core/getlimits.py
|
35
|
"""Machine limits for Float32 and Float64 and (long double) if available...
"""
from __future__ import division, absolute_import, print_function
__all__ = ['finfo', 'iinfo']
from .machar import MachAr
from . import numeric
from . import numerictypes as ntypes
from .numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0:
a.shape = (1,)
return a
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
class finfo(object):
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
bits : int
The number of bits occupied by the type.
eps : float
The smallest representable positive number such that
``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
point type.
epsneg : floating point number of the appropriate type
The smallest representable positive number such that
``1.0 - epsneg != 1.0``.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
The smallest positive usable number. Type of `tiny` is an
appropriate floating point type.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
if dtype is ntypes.double:
itype = ntypes.int64
fmt = '%24.16e'
precname = 'double'
elif dtype is ntypes.single:
itype = ntypes.int32
fmt = '%15.7e'
precname = 'single'
elif dtype is ntypes.longdouble:
itype = ntypes.longlong
fmt = '%s'
precname = 'long double'
elif dtype is ntypes.half:
itype = ntypes.int16
fmt = '%12.5e'
precname = 'half'
else:
raise ValueError(repr(dtype))
machar = MachAr(lambda v:array([v], dtype),
lambda v:_frz(v.astype(itype))[0],
lambda v:array(_frz(v)[0], dtype),
lambda v: fmt % array(_frz(v)[0], dtype),
'numpy %s precision floating point number' % precname)
for word in ['precision', 'iexp',
'maxexp', 'minexp', 'negep',
'machep']:
setattr(self, word, getattr(machar, word))
for word in ['tiny', 'resolution', 'epsneg']:
setattr(self, word, getattr(machar, word).flat[0])
self.bits = self.dtype.itemsize * 8
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
'machep = %(machep)6s eps = %(_str_eps)s\n'
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
'nexp = %(nexp)6s min = -max\n'
'---------------------------------------------------------------\n'
)
return fmt % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
class iinfo(object):
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
bits : int
The number of bits occupied by the type.
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if self.kind not in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
min = property(min)
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1 << self.bits) - 1)
else:
val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
max = property(max)
def __str__(self):
"""String representation."""
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'min = %(min)s\n'
'max = %(max)s\n'
'---------------------------------------------------------------\n'
)
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
if __name__ == '__main__':
f = finfo(ntypes.single)
print('single epsilon:', f.eps)
print('single tiny:', f.tiny)
f = finfo(ntypes.float)
print('float epsilon:', f.eps)
print('float tiny:', f.tiny)
f = finfo(ntypes.longfloat)
print('longfloat epsilon:', f.eps)
print('longfloat tiny:', f.tiny)
|
MoneyPush/AlgorithmicTradingApiPython
|
refs/heads/master
|
setup.py
|
2
|
from setuptools import setup
setup(name='BigDataTradeAPIPy',
version='0.0.2',
description='BigDataTrade API in python',
url='https://github.com/bigdatatrade/AlgorithmicTradingApiPython',
author='Nacass Tommy',
author_email='tommy.nacass@gmail.com',
license='MIT',
packages=['bigdatatrade'],
zip_safe=False)
|
mariomosca/damnvid
|
refs/heads/master
|
dLog.py
|
12
|
# -*- coding: utf-8 -*-
import os, sys
import time
import traceback
from dCore import *
class DamnLog:
def __init__(self, logpath=None, stderr=True, flush=False, handleerrors=True, overrides={}):
DamnLog.instance = self
self.time = 0
self.streams = []
self.autoflush = flush
self.overrides = {}
if logpath is not None:
try:
if not os.path.exists(os.path.dirname(logpath)):
os.makedirs(os.path.dirname(logpath))
f = DamnOpenFile(logpath, 'wb')
self.streams.append(f)
f.write((self.getPrefix() + u'Log opened.').encode('utf8'))
except:
try:
print 'Warning: Couldn\'t open log file!'
traceback.print_exc()
except:
pass
if stderr:
self.streams.append(sys.stdout)
if handleerrors:
try:
sys.excepthook = self.logException
except:
self.log('!! Cannot override excepthook. This looks bad.')
def getPrefix(self):
t = int(time.time())
if self.time != t:
self.time = t
return u'[' + DamnUnicode(time.strftime('%H:%M:%S')) + u'] '
return u''
def write(self, message):
message = u'\r\n' + (self.getPrefix() + DamnUnicode(message.strip())).strip()
for s in self.streams:
try:
print >> s, message.encode('utf8'),
except:
try:
print 'Could not print to stream', s,'message:', message.strip()
except:
pass
if self.autoflush:
self.flush()
def log(self, *args):
import dCore
s = []
for i in args:
i = dCore.DamnUnicode(i)
for k in self.overrides.iterkeys():
i = i.replace(k, self.overrides[k])
s.append(i)
return self.write(u' '.join(s))
def logException(self, typ, value, tb):
import traceback
import dCore
import dLog
try:
info = traceback.format_exception(typ, value, tb)
e = []
for i in info:
e.append(dCore.DamnUnicode(i).strip())
self.log('!!',u'\n'.join(e))
except:
try:
self.log('!! Error while logging exception. Something is very wrong.')
except:
pass # Something is very, very wrong.
def flush(self):
for s in self.streams:
try:
s.flush()
except:
pass
try:
os.fsync(s)
except:
pass
def close(self):
self.log('Closing log.')
for s in self.streams:
if s != sys.stderr:
try:
s.close()
except:
pass
def addOverride(target, replacement=u''):
self.overrides[DamnUnicode(target)] = DamnUnicode(replacement)
def Damnlog(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.log(*args)
return None
def DamnlogException(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.logException(*args)
return None
def DamnlogOverride(target, replacement=u''):
DamnLog.instance.addOverride(target, replacement)
|
taxpon/sverchok
|
refs/heads/master
|
old_nodes/eval_knieval.py
|
4
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import os
import re
import ast
import traceback
from ast import literal_eval
import bpy
from mathutils import Vector, Matrix, Euler, Quaternion, Color
from bpy.props import FloatProperty, StringProperty, BoolProperty, EnumProperty
from sverchok.node_tree import SverchCustomTreeNode, StringsSocket, VerticesSocket, MatrixSocket
from sverchok.data_structure import updateNode, SvGetSocketAnyType, SvSetSocketAnyType, Matrix_generate
'''
- SET: `path.to.prop`
- GET: `path.to.prop`
- DO: `eval_text(a, b, [True])
`read_text(a, [True])
`do_function(a) with x
'''
def read_text(fp, update=True):
"""
if args has separators then look on local disk else in .blend.
update writes the changes to the textfile in blender
"""
texts = bpy.data.texts
internal_file = False
text_name = fp
if not (os.sep in fp) and (fp in texts):
# print(fp)
# file in blend, but linked outside
# print('internal file!')
internal_file = True
fp = texts[text_name].filepath
fp = bpy.path.abspath(fp)
with open(fp) as new_text:
text_body = ''.join(new_text.readlines())
if internal_file and update:
texts[text_name].from_string(text_body)
return literal_eval(text_body)
# def eval_text(node, function_text, out_text, update=True):
def eval_text(function_text, out_text, update=True):
"""
eval_text(function_text, out_text, update=True)
: function_text
a reference to a file inside blender. This text should be initiated outside
of blender or made external by saving and loading. The content of this file is
what writes to the out_text.
: out_text
the internal text file to read from. The content of which might be changing on
each update.
: update
this parameter isn't very useful at the moment, but keep it to True if you
want to update the content of the internal text file. Else only the external
file will be read.
"""
texts = bpy.data.texts
text = texts[function_text]
if update:
fp = text.filepath
fp = bpy.path.abspath(fp)
with open(fp) as new_text:
text_body = ''.join(new_text.readlines())
text.from_string(text_body)
# at this point text is updated and can be executed.
# could be cached in node.
text = texts[function_text]
exec(text.as_string())
# if function_text execed OK, then it has written to texts[out_text]
# This file out_text should exist.
out_data = None
if out_text in texts:
written_data = texts[out_text].as_string()
out_data = literal_eval(written_data)
return out_data
def get_params(prop, pat):
"""function to convert the string representation to literal arguments ready for passing"""
regex = re.compile(pat)
return literal_eval(regex.findall(prop)[0])
def process_macro(node, macro, prop_to_eval):
params = get_params(prop_to_eval, '\(.*?\)')
tvar = None
fn = None
if macro == 'eval_text':
if 2 <= len(params) <= 3:
fn = eval_text
else:
if 1 <= len(params) <= 2:
fn = read_text
if not fn:
return
# do this once, if success skip the try on the next update
if not node.eval_success:
try:
tvar = fn(*params)
except Exception as err:
if node.full_traceback:
print(traceback.format_exc())
else:
fail_msg = "nope, {type} with ({params}) failed - try full traceback"
print(fail_msg.format(type=macro, params=str(params)))
node.previous_eval_str = ""
finally:
node.eval_success = False if (tvar is None) else True
print('success?', node.eval_success)
return tvar
else:
print('running {macro} unevalled'.format(macro=macro))
return fn(*params)
def process_prop_string(node, prop_to_eval):
"""
First it is evaluated in a try/except scenario, and if that went OK then the next update
is without try/except.
example eval strings might be:
objs['Cube'].location
objs['Cube'].matrix_world
I have expressively not implemented a wide range of features, imo that's what Scriped Node
is best at.
"""
tvar = None
c = bpy.context
scene = c.scene
data = bpy.data
objs = data.objects
mats = data.materials
meshes = data.meshes
texts = data.texts
# yes there's a massive assumption here too.
if not node.eval_success:
try:
tvar = eval(prop_to_eval)
except Exception as err:
if node.full_traceback:
print(traceback.format_exc())
else:
print("nope, crash n burn hard - try full traceback")
node.previous_eval_str = ""
finally:
print('evalled', tvar)
node.eval_success = False if (tvar is None) else True
else:
tvar = eval(prop_to_eval)
return tvar
def process_input_to_bpy(node, tvar, stype):
"""
this is one-way, node is the reference to the current eval node. tvar is the current
variable being introduced into bpy. First it is executed in a try/except scenario,
and if that went OK then the next update is without try/except.
"""
c = bpy.context
scene = c.scene
data = bpy.data
objs = data.objects
mats = data.materials
meshes = data.meshes
if stype == 'MatrixSocket':
tvar = str(tvar[:])
fxed = (node.eval_str.strip() + " = {x}").format(x=tvar)
# yes there's a massive assumption here.
if not node.eval_success:
success = False
try:
exec(fxed)
success = True
node.previous_eval_str = node.eval_str
except Exception as err:
if node.full_traceback:
print(traceback.format_exc())
else:
print("nope, crash n burn - try full traceback")
success = False
node.previous_eval_str = ""
finally:
node.eval_success = success
else:
exec(fxed)
def process_input_dofunction(node, x):
"""
This function aims to facilitate the repeated execution of a python file
located inside Blender. Similar to Scripted Node but with the restriction
that it has one input by design. Realistically the input can be an array,
and therefore nested with a collection of variables.
The python file to exec shall be specified in the eval string like so:
`do_function('file_name.py') with x`
Here x is the value of the input socket, this will automatically be in the
scope of the function when EK calls it. First it is executed in a
try/except scenario, and if that went OK then the next update is without
try/except.
The content of file_name.py can be anything that executes, function or
a flat file. The following convenience variables will be present.
"""
c = bpy.context
scene = c.scene
data = bpy.data
objs = data.objects
mats = data.materials
meshes = data.meshes
texts = data.texts
# extract filename
# if filename not in .blend return and throw error
function_file = get_params(node.eval_str, '\(.*?\)')
if not (function_file in texts):
print('function_file, not found -- check spelling')
node.eval_success = False
node.previous_eval_str = ""
return
text = texts[function_file]
raw_text_str = text.as_string()
# yes there's a massive assumption here.
if not node.eval_success:
success = False
try:
exec(raw_text_str)
success = True
node.previous_eval_str = node.eval_str
except Exception as err:
if node.full_traceback:
print(traceback.format_exc())
else:
print("nope, crash n burn - try full traceback")
success = False
node.previous_eval_str = ""
finally:
node.eval_success = success
else:
exec(raw_text_str)
def wrap_output_data(tvar):
if isinstance(tvar, Vector):
data = [[tvar[:]]]
elif isinstance(tvar, Matrix):
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, (Euler, Quaternion)):
tvar = tvar.to_matrix().to_4x4()
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, list):
data = [tvar]
else:
data = tvar
return data
class EvalKnievalNode(bpy.types.Node, SverchCustomTreeNode):
''' Eval Knieval Node '''
bl_idname = 'EvalKnievalNode'
bl_label = 'Eval Knieval Node'
bl_icon = 'OUTLINER_OB_EMPTY'
x = FloatProperty(
name='x', description='x variable', default=0.0, precision=5, update=updateNode)
eval_str = StringProperty(update=updateNode)
previous_eval_str = StringProperty()
mode = StringProperty(default="input")
previous_mode = StringProperty(default="input")
eval_success = BoolProperty(default=False)
# not hooked up yet.
eval_knieval_mode = BoolProperty(
default=True,
description="when unticked, try/except is done only once")
# hyper: because it's above mode.
current_hyper = StringProperty(default="SET")
hyper_options = [
("DO", "Do", "", 0),
("SET", "Set", "", 1),
("GET", "Get", "", 2)
]
def mode_change(self, context):
if not (self.selected_hyper == self.current_hyper):
self.label = self.selected_hyper
self.update_outputs_and_inputs()
self.current_hyper = self.selected_hyper
updateNode(self, context)
selected_hyper = EnumProperty(
items=hyper_options,
name="Behavior",
description="Choices of behavior",
default="SET",
update=mode_change)
full_traceback = BoolProperty()
def init(self, context):
self.inputs.new('StringsSocket', "x").prop_name = 'x'
self.width = 400
def draw_buttons(self, context, layout):
if self.selected_hyper in {'DO', 'SET'}:
row = layout.row()
# row.separator()
row.label('')
row = layout.row()
row.prop(self, 'selected_hyper', expand=True)
row = layout.row()
row.prop(self, 'eval_str', text='')
def draw_buttons_ext(self, context, layout):
row = layout.row()
# row.prop(self, 'eval_knieval_mode', text='eval knieval mode')
row.prop(self, 'full_traceback', text='full traceback')
def update_outputs_and_inputs(self):
self.mode = {
'SET': 'input',
'GET': 'output',
'DO': 'input'
}.get(self.selected_hyper, None)
if not (self.mode == self.previous_mode):
self.set_sockets()
self.previous_mode = self.mode
self.eval_success = False
def update(self):
"""
Update behaves like the conductor, it detects the modes and sends flow control
to functions that know how to deal with socket data consistent with those modes.
It also avoids extra calculation by figuring out if input/output critera are
met before anything is processed. It returns early if it can.
"""
inputs = self.inputs
outputs = self.outputs
if self.mode == "input" and len(inputs) == 0:
return
elif self.mode == "output" and len(outputs) == 0:
return
if len(self.eval_str) <= 4:
return
if not (self.eval_str == self.previous_eval_str):
self.eval_success = False
{
"input": self.input_mode,
"output": self.output_mode
}.get(self.mode, lambda: None)()
self.set_ui_color()
def input_mode(self):
inputs = self.inputs
if (len(inputs) == 0) or (not inputs[0].links):
print('has no link!')
return
# then morph default socket type to whatever we plug into it.
from_socket = inputs[0].links[0].from_socket
incoming_socket_type = type(from_socket)
stype = {
VerticesSocket: 'VerticesSocket',
MatrixSocket: 'MatrixSocket',
StringsSocket: 'StringsSocket'
}.get(incoming_socket_type, None)
# print(incoming_socket_type, from_socket, stype)
if not stype:
print('unidentified flying input')
return
# if the current self.input socket is different to incoming
if not (stype == self.inputs[0].bl_idname):
self.morph_input_socket_type(stype)
# only one nesting level supported, for types other than matrix.
# else x gets complicated. x is complex already, this forces
# simplicity
tvar = None
if stype == 'MatrixSocket':
prop = SvGetSocketAnyType(self, inputs[0])
tvar = Matrix_generate(prop)[0]
# print('---repr-\n', repr(tvar))
else:
tvar = SvGetSocketAnyType(self, inputs[0])[0][0]
# input can still be empty or []
if not tvar:
return
if self.eval_str.endswith("with x"):
process_input_dofunction(self, tvar)
else:
process_input_to_bpy(self, tvar, stype)
def output_mode(self):
outputs = self.outputs
if (len(outputs) == 0) or (not outputs[0].links):
print('has no link!')
return
prop_to_eval = self.eval_str.strip()
macro = prop_to_eval.split("(")[0]
tvar = None
if macro in ['eval_text', 'read_text']:
tvar = process_macro(self, macro, prop_to_eval)
else:
tvar = process_prop_string(self, prop_to_eval)
# explicit None must be caught. not 0 or False
if tvar is None:
return
if not (self.previous_eval_str == self.eval_str):
print("tvar: ", tvar)
self.morph_output_socket_type(tvar)
# finally we can set this.
data = wrap_output_data(tvar)
SvSetSocketAnyType(self, 0, data)
self.previous_eval_str = self.eval_str
def set_sockets(self):
"""
Triggered by mode changes between [input, output] this removes the socket
from one side and adds a socket to the other side. This way you have something
to plug into. When you connect a node to a socket, the socket can then be
automagically morphed to match the socket-type. (morphing is however done in the
morph functions)
"""
a, b = {
'input': (self.inputs, self.outputs),
'output': (self.outputs, self.inputs)
}[self.mode]
b.clear()
a.new('StringsSocket', 'x')
if self.mode == 'input':
a[0].prop_name = 'x'
def set_ui_color(self):
self.use_custom_color = True
self.color = (1.0, 1.0, 1.0) if self.eval_success else (0.98, 0.6, 0.6)
def morph_output_socket_type(self, tvar):
"""
Set the output according to the data types
the body of this if-statement is done only infrequently,
when the eval string is not the same as the last eval.
"""
outputs = self.outputs
output_socket_type = 'StringsSocket'
if isinstance(tvar, Vector):
output_socket_type = 'VerticesSocket'
elif isinstance(tvar, (list, tuple)):
output_socket_type = 'VerticesSocket'
elif isinstance(tvar, (Matrix, Euler, Quaternion)):
output_socket_type = 'MatrixSocket'
elif isinstance(tvar, (int, float)):
output_socket_type = 'StringsSocket'
links = outputs[0].links
needs_reconnect = False
if links and links[0]:
needs_reconnect = True
link = links[0]
node_to = link.to_node
socket_to = link.to_socket
# needs clever reconnect? maybe not.
if outputs[0].bl_idname != output_socket_type:
outputs.clear()
outputs.new(output_socket_type, 'x')
if needs_reconnect:
ng = self.id_data
ng.links.new(outputs[0], socket_to)
def morph_input_socket_type(self, new_type):
"""
Recasts current input socket type to conform to incoming type
Preserves the connection.
"""
# where is the data coming from?
inputs = self.inputs
link = inputs[0].links[0]
node_from = link.from_node
socket_from = link.from_socket
# flatten and reinstate
inputs.clear()
inputs.new(new_type, 'x')
# reconnect
ng = self.id_data
ng.links.new(socket_from, inputs[0])
def update_socket(self, context):
self.update()
def register():
bpy.utils.register_class(EvalKnievalNode)
def unregister():
bpy.utils.unregister_class(EvalKnievalNode)
|
webmasteraxe/watchman
|
refs/heads/master
|
tests/integration/touch.py
|
10
|
# Portable simple implementation of `touch`
import os
import sys
import errno
fname = sys.argv[1]
try:
os.utime(fname, None)
except OSError as e:
if e.errno == errno.ENOENT:
with open(fname, 'a'):
os.utime(fname, None)
else:
raise
|
JamesRaynor67/mptcp_with_machine_learning
|
refs/heads/master
|
machineLearning/ver_0.10_q-learning/rl_server_ver_0.10(q-table).py
|
1
|
import socket
import pandas
from time import sleep
from rl_socket import Interacter_socket
from RL_core import QLearningTable
from RL_core import extract_observation
from RL_core import action_translator
from RL_core import calculate_reward
from RL_core import apply_action
from shutil import copyfile
def IsInt(s):
# A naive method, but enough here
if "." in s:
return False
else:
return True
class DataRecorder():
def __init__(self):
self.next_seq_num = 0
self.subflow_data = {}
self.action = []
def add_one_record(self, str_data):
# name$value#name$value...
pair_list = str_data.split("$")
one_row_of_train_data = {}
for pair in pair_list:
if len(pair) > 3: # this ensures the string (pair) is not empty or string with single '$'
name_val_list = pair.split("#")
# print "Hong Jiaming: 1 ", pair, len(name_val_list), name_val_list[0], name_val_list[1]
if IsInt(name_val_list[1]):
one_row_of_train_data[name_val_list[0]] = int(name_val_list[1])
else:
one_row_of_train_data[name_val_list[0]] = float(name_val_list[1])
# ensure this transmission is right and complete
# neighbour TCP segments must not combined into one
assert one_row_of_train_data["size"] == len(one_row_of_train_data)
assert one_row_of_train_data["ssn"] == self.next_seq_num
self.subflow_data[self.next_seq_num] = one_row_of_train_data
self.next_seq_num += 1
def get_subflow_data_dic(self):
return self.subflow_data
def get_latest_subflow_data(self):
return self.subflow_data[self.next_seq_num-1]
def add_pair_to_last_record(self, name, value):
self.subflow_data[self.next_seq_num-1][name] = value
def print_all_subflow_data(self):
print "dic size: ", len(self.subflow_data)
for ssn, data in self.subflow_data.iteritems():
for k, v in data.iteritems():
print "key: ", k, "value: ", v
def print_latest_subflow_data(self):
latest_data = self.subflow_data[self.next_seq_num-1]
for k, v in latest_data.iteritems():
print "key: ", k, "value: ", v
if __name__ == "__main__":
episode_count = 0
RL = QLearningTable(actions=["use subflow 0", "use subflow 1"])
while episode_count < 1000:
interacter_socket = Interacter_socket(host = '', port = 12345)
dataRecorder = DataRecorder()
interacter_socket.listen()
recv_str, this_batch_done = interacter_socket.recv()
dataRecorder.add_one_record(recv_str)
print 'iter: ', episode_count
f = open("/home/hong/workspace/mptcp/ns3/mptcp_output/calculate_reward", 'w')
f.write("time,reward\n")
while True:
observation_before_action = extract_observation(dataRecorder)
# print observation_before_action
action = RL.choose_action(observation_before_action)
# print action
apply_action(interacter_socket, dataRecorder, action)
recv_str, this_batch_done = interacter_socket.recv() # get new observation and reward
if this_batch_done is True:
break
dataRecorder.add_one_record(recv_str)
observation_after_action = extract_observation(dataRecorder)
reward = calculate_reward(dataRecorder)
f.write(str(dataRecorder.get_latest_subflow_data()["time"]) + ',' + str(reward) + '\n')
RL.learn(observation_before_action, action, reward, observation_after_action) # RL learning
observation_before_action = observation_after_action
interacter_socket.close()
interacter_socket = None
f.close()
RL.q_table.to_csv("/home/hong/workspace/mptcp/ns3/mptcp_output/q_table")
copyfile("/home/hong/workspace/mptcp/ns3/mptcp_output/calculate_reward", '/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(episode_count) + '_calculate_reward')
copyfile("/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_client", '/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(episode_count) + '_mptcp_client')
copyfile("/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_drops", '/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(episode_count) + '_mptcp_drops')
copyfile("/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_server", '/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(episode_count) + '_mptcp_server')
copyfile("/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_server_cWnd", '/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(episode_count) + '_mptcp_server_cWnd')
# print "sleep 30 seconds from now"
# sleep(30)
episode_count += 1
|
draios/python-sdc-client
|
refs/heads/master
|
specs/secure/policy_v1_spec.py
|
1
|
import json
import os
import random
from expects import expect
from mamba import before, description, after, it
from sdcclient import SdSecureClientV1
from specs import be_successful_api_call
_POLICY_NAME = "Test - Launch Suspicious Network Tool on Host"
_POLICY_DESCRIPTION = "Detect network tools launched on the host"
_POLICY_RULES_REGEX = "Launch Suspicious Network Tool on Host"
_POLICY_ACTIONS = [
{
"type": "POLICY_ACTION_STOP",
"msg": ""
},
{
"type": "POLICY_ACTION_PAUSE",
"msg": ""
},
{
"type": "POLICY_ACTION_CAPTURE",
"beforeEventNs": 5000000000,
"afterEventNs": 18000000000,
"isLimitedToContainer": True
}
]
def policy_json():
return """\
{
"name": "%s",
"description": "%s",
"notificationChannelIds": [],
"severity": 0,
"hostScope": true,
"enabled": true,
"actions": %s,
"falcoConfiguration": {
"fields": [],
"ruleNameRegEx": "%s",
"onDefault": "DEFAULT_MATCH_EFFECT_NEXT"
},
"policyEventsCount": 0,
"isManual": true,
"isBuiltin": true,
"containerScope": true,
"modifiedOn": 1597646118000,
"createdOn": 1597646118000
}
""" % (_POLICY_NAME, _POLICY_DESCRIPTION, json.dumps(_POLICY_ACTIONS), _POLICY_RULES_REGEX)
with description("Policies v1") as self:
with before.all:
self.clientV1 = SdSecureClientV1(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"),
token=os.getenv("SDC_SECURE_TOKEN"))
with after.each:
self.cleanup_policies()
def cleanup_policies(self):
_, res = self.clientV1.list_policies()
for policy in res['policies']:
if str(policy["name"]).startswith("Test - "):
ok, res = self.clientV1.delete_policy_id(policy["id"])
expect((ok, res)).to(be_successful_api_call)
with it("is able to list all existing policies"):
ok, res = self.clientV1.list_policies()
expect((ok, res)).to(be_successful_api_call)
with it("is able to list all policies priorities"):
ok, res = self.clientV1.get_policy_priorities()
expect((ok, res)).to(be_successful_api_call)
with it("is able to change the evaluation order of policies"):
ok, res = self.clientV1.get_policy_priorities()
random.shuffle(res['priorities']['policyIds'])
ok, res = self.clientV1.set_policy_priorities(json.dumps(res))
expect((ok, res)).to(be_successful_api_call)
with it("is able to add a policy from JSON"):
call = self.clientV1.add_policy(policy_json())
expect(call).to(be_successful_api_call)
with it("is able to get a policy by id"):
ok, res = self.clientV1.list_policies()
id = res['policies'][0]['id']
call = self.clientV1.get_policy_id(id)
expect(call).to(be_successful_api_call)
with it("is able to get a policy by name"):
ok, res = self.clientV1.list_policies()
name = res['policies'][0]['name']
call = self.clientV1.get_policy(name)
expect(call).to(be_successful_api_call)
with it("is able to update a policy from JSON"):
ok, res = self.clientV1.list_policies()
policy_json = res['policies'][0]
policy_json['description'] = "Updated description"
call = self.clientV1.update_policy(json.dumps(policy_json))
expect(call).to(be_successful_api_call)
with it("is able to delete a single policy by id"):
ok, res = self.clientV1.list_policies()
ok, res = self.clientV1.delete_policy_id(res['policies'][0]['id'])
expect((ok, res)).to(be_successful_api_call)
with it("is able to delete a single policy by name"):
ok, res = self.clientV1.list_policies()
ok, res = self.clientV1.delete_policy_name(res['policies'][1]['name'])
expect((ok, res)).to(be_successful_api_call)
with it("is able to delete all policies at once"):
ok, res = self.clientV1.delete_all_policies()
expect((ok, res)).to(be_successful_api_call)
with it("is able to create the default policies"):
ok, res = self.clientV1.create_default_policies()
expect((ok, res)).to(be_successful_api_call)
|
ChromiumWebApps/chromium
|
refs/heads/master
|
build/android/adb_logcat_printer.py
|
27
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shutdown adb_logcat_monitor and print accumulated logs.
To test, call './adb_logcat_printer.py <base_dir>' where
<base_dir> contains 'adb logcat -v threadtime' files named as
logcat_<deviceID>_<sequenceNum>
The script will print the files to out, and will combine multiple
logcats from a single device if there is overlap.
Additionally, if a <base_dir>/LOGCAT_MONITOR_PID exists, the script
will attempt to terminate the contained PID by sending a SIGINT and
monitoring for the deletion of the aforementioned file.
"""
import cStringIO
import logging
import optparse
import os
import re
import signal
import sys
import time
# Set this to debug for more verbose output
LOG_LEVEL = logging.INFO
def CombineLogFiles(list_of_lists, logger):
"""Splices together multiple logcats from the same device.
Args:
list_of_lists: list of pairs (filename, list of timestamped lines)
logger: handler to log events
Returns:
list of lines with duplicates removed
"""
cur_device_log = ['']
for cur_file, cur_file_lines in list_of_lists:
# Ignore files with just the logcat header
if len(cur_file_lines) < 2:
continue
common_index = 0
# Skip this step if list just has empty string
if len(cur_device_log) > 1:
try:
line = cur_device_log[-1]
# Used to make sure we only splice on a timestamped line
if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line):
common_index = cur_file_lines.index(line)
else:
logger.warning('splice error - no timestamp in "%s"?', line.strip())
except ValueError:
# The last line was valid but wasn't found in the next file
cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']
logger.info('Unable to splice %s. Incomplete logcat?', cur_file)
cur_device_log += ['*'*30 + ' %s' % cur_file]
cur_device_log.extend(cur_file_lines[common_index:])
return cur_device_log
def FindLogFiles(base_dir):
"""Search a directory for logcat files.
Args:
base_dir: directory to search
Returns:
Mapping of device_id to a sorted list of file paths for a given device
"""
logcat_filter = re.compile(r'^logcat_(\w+)_(\d+)$')
# list of tuples (<device_id>, <seq num>, <full file path>)
filtered_list = []
for cur_file in os.listdir(base_dir):
matcher = logcat_filter.match(cur_file)
if matcher:
filtered_list += [(matcher.group(1), int(matcher.group(2)),
os.path.join(base_dir, cur_file))]
filtered_list.sort()
file_map = {}
for device_id, _, cur_file in filtered_list:
if device_id not in file_map:
file_map[device_id] = []
file_map[device_id] += [cur_file]
return file_map
def GetDeviceLogs(log_filenames, logger):
"""Read log files, combine and format.
Args:
log_filenames: mapping of device_id to sorted list of file paths
logger: logger handle for logging events
Returns:
list of formatted device logs, one for each device.
"""
device_logs = []
for device, device_files in log_filenames.iteritems():
logger.debug('%s: %s', device, str(device_files))
device_file_lines = []
for cur_file in device_files:
with open(cur_file) as f:
device_file_lines += [(cur_file, f.read().splitlines())]
combined_lines = CombineLogFiles(device_file_lines, logger)
# Prepend each line with a short unique ID so it's easy to see
# when the device changes. We don't use the start of the device
# ID because it can be the same among devices. Example lines:
# AB324: foo
# AB324: blah
device_logs += [('\n' + device[-5:] + ': ').join(combined_lines)]
return device_logs
def ShutdownLogcatMonitor(base_dir, logger):
"""Attempts to shutdown adb_logcat_monitor and blocks while waiting."""
try:
monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
with open(monitor_pid_path) as f:
monitor_pid = int(f.readline())
logger.info('Sending SIGTERM to %d', monitor_pid)
os.kill(monitor_pid, signal.SIGTERM)
i = 0
while True:
time.sleep(.2)
if not os.path.exists(monitor_pid_path):
return
if not os.path.exists('/proc/%d' % monitor_pid):
logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid)
return
logger.info('Waiting for logcat process to terminate.')
i += 1
if i >= 10:
logger.warning('Monitor pid did not terminate. Continuing anyway.')
return
except (ValueError, IOError, OSError):
logger.exception('Error signaling logcat monitor - continuing')
def main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options] <log dir>')
parser.add_option('--output-path',
help='Output file path (if unspecified, prints to stdout)')
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Wrong number of unparsed args')
base_dir = args[0]
if options.output_path:
output_file = open(options.output_path, 'w')
else:
output_file = sys.stdout
log_stringio = cStringIO.StringIO()
logger = logging.getLogger('LogcatPrinter')
logger.setLevel(LOG_LEVEL)
sh = logging.StreamHandler(log_stringio)
sh.setFormatter(logging.Formatter('%(asctime)-2s %(levelname)-8s'
' %(message)s'))
logger.addHandler(sh)
try:
# Wait at least 5 seconds after base_dir is created before printing.
#
# The idea is that 'adb logcat > file' output consists of 2 phases:
# 1 Dump all the saved logs to the file
# 2 Stream log messages as they are generated
#
# We want to give enough time for phase 1 to complete. There's no
# good method to tell how long to wait, but it usually only takes a
# second. On most bots, this code path won't occur at all, since
# adb_logcat_monitor.py command will have spawned more than 5 seconds
# prior to called this shell script.
try:
sleep_time = 5 - (time.time() - os.path.getctime(base_dir))
except OSError:
sleep_time = 5
if sleep_time > 0:
logger.warning('Monitor just started? Sleeping %.1fs', sleep_time)
time.sleep(sleep_time)
assert os.path.exists(base_dir), '%s does not exist' % base_dir
ShutdownLogcatMonitor(base_dir, logger)
separator = '\n' + '*' * 80 + '\n\n'
for log in GetDeviceLogs(FindLogFiles(base_dir), logger):
output_file.write(log)
output_file.write(separator)
with open(os.path.join(base_dir, 'eventlog')) as f:
output_file.write('\nLogcat Monitor Event Log\n')
output_file.write(f.read())
except:
logger.exception('Unexpected exception')
logger.info('Done.')
sh.flush()
output_file.write('\nLogcat Printer Event Log\n')
output_file.write(log_stringio.getvalue())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
foglamp/FogLAMP
|
refs/heads/develop
|
python/foglamp/plugins/common/shim/south_shim.py
|
1
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""shim layer between Python and C++"""
import os
import importlib.util
import sys
import json
import logging
from foglamp.common import logger
from foglamp.common.common import _FOGLAMP_ROOT
from foglamp.services.core.api.plugins import common
_LOGGER = logger.setup(__name__, level=logging.WARN)
_plugin = None
_LOGGER.info("Loading shim layer for python plugin '{}' ".format(sys.argv[1]))
def _plugin_obj():
plugin = sys.argv[1]
plugin_type = "south"
plugin_module_path = "{}/python/foglamp/plugins/{}/{}".format(_FOGLAMP_ROOT, plugin_type, plugin)
_plugin=common.load_python_plugin(plugin_module_path, plugin, plugin_type)
return _plugin
_plugin = _plugin_obj()
def plugin_info():
_LOGGER.info("plugin_info called")
handle = _plugin.plugin_info()
handle['config'] = json.dumps(handle['config'])
return handle
def plugin_init(config):
_LOGGER.info("plugin_init called")
handle = _plugin.plugin_init(json.loads(config))
# TODO: FOGL-1827 - Config item value must be respected as per type given
revised_handle = _revised_config_for_json_item(handle)
return revised_handle
def plugin_poll(handle):
reading = _plugin.plugin_poll(handle)
return reading
def plugin_reconfigure(handle, new_config):
_LOGGER.info("plugin_reconfigure")
new_handle = _plugin.plugin_reconfigure(handle, json.loads(new_config))
# TODO: FOGL-1827 - Config item value must be respected as per type given
revised_handle = _revised_config_for_json_item(new_handle)
return revised_handle
def plugin_shutdown(handle):
_LOGGER.info("plugin_shutdown")
return _plugin.plugin_shutdown(handle)
def plugin_start(handle):
_LOGGER.info("plugin_start")
return _plugin.plugin_start(handle)
def plugin_register_ingest(handle, callback, ingest_ref):
_LOGGER.info("plugin_register_ingest")
return _plugin.plugin_register_ingest(handle, callback, ingest_ref)
def _revised_config_for_json_item(config):
# South C server sends "config" argument as string in which all JSON type items' components,
# 'default' and 'value', gets converted to dict during json.loads(). Hence we need to restore
# them to str, which is the required format for configuration items.
revised_config_handle = {}
for k, v in config.items():
if isinstance(v, dict):
if 'type' in v and v['type'] == 'JSON':
if isinstance(v['default'], dict):
v['default'] = json.dumps(v['default'])
if isinstance(v['value'], dict):
v['value'] = json.dumps(v['value'])
revised_config_handle.update({k: v})
return revised_config_handle
|
flacjacket/sympy
|
refs/heads/master
|
sympy/mpmath/tests/extratest_zeta.py
|
23
|
from mpmath import zetazero
from timeit import default_timer as clock
def test_zetazero():
cases = [\
(399999999, 156762524.6750591511),
(241389216, 97490234.2276711795),
(526196239, 202950727.691229534),
(542964976, 209039046.578535272),
(1048449112, 388858885.231056486),
(1048449113, 388858885.384337406),
(1048449114, 388858886.002285122),
(1048449115, 388858886.00239369),
(1048449116, 388858886.690745053)
]
for n, v in cases:
print(n, v)
t1 = clock()
ok = zetazero(n).ae(complex(0.5,v))
t2 = clock()
print("ok =", ok, ("(time = %s)" % round(t2-t1,3)))
print("Now computing two huge zeros (this may take hours)")
print("Computing zetazero(8637740722917)")
ok = zetazero(8637740722917).ae(complex(0.5,2124447368584.39296466152))
print("ok =", ok)
ok = zetazero(8637740722918).ae(complex(0.5,2124447368584.39298170604))
print("ok =", ok)
if __name__ == "__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
test_zetazero()
|
hwheeler01/comp150
|
refs/heads/gh-pages
|
_site/examples/greet.py
|
2
|
"""Simple example with Entry objects.
Enter your name, click the mouse, and see greetings.
"""
from graphics import *
def main():
win = GraphWin("Greeting", 300, 300)
win.yUp()
instructions = Text(Point(win.getWidth()/2, 40),
"Enter your name.\nThen click the mouse.")
instructions.draw(win)
entry1 = Entry(Point(win.getWidth()/2, 200),10)
entry1.draw(win)
Text(Point(win.getWidth()/2, 230),'Name:').draw(win) # label for the Entry
win.getMouse() # To know the user is finished with the text.
name = entry1.getText()
greeting1 = 'Hello, ' + name + '!'
Text(Point(win.getWidth()/3, 150), greeting1).draw(win)
greeting2 = 'Bonjour, ' + name + '!'
Text(Point(2*win.getWidth()/3, 100), greeting2).draw(win)
win.promptClose(instructions)
main()
|
Rewardcoin/p2ppool-SGcoin
|
refs/heads/master
|
wstools/tests/test_wstools.py
|
308
|
#!/usr/bin/env python
############################################################################
# Joshua R. Boverhof, David W. Robertson, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import unittest, tarfile, os, ConfigParser
import test_wsdl
SECTION='files'
CONFIG_FILE = 'config.txt'
def extractFiles(section, option):
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
archives = config.get(section, option)
archives = eval(archives)
for file in archives:
tar = tarfile.open(file)
if not os.access(tar.membernames[0], os.R_OK):
for i in tar.getnames():
tar.extract(i)
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(test_wsdl.makeTestSuite("services_by_file"))
return suite
def main():
extractFiles(SECTION, 'archives')
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
|
FrankBian/kuma
|
refs/heads/master
|
vendor/packages/sqlalchemy/doc/build/testdocs.py
|
7
|
import sys
sys.path = ['../../lib', './lib/'] + sys.path
import os
import re
import doctest
import sqlalchemy.util as util
import sqlalchemy.log as salog
import logging
salog.default_enabled=True
rootlogger = logging.getLogger('sqlalchemy')
rootlogger.setLevel(logging.NOTSET)
class MyStream(object):
def write(self, string):
sys.stdout.write(string)
sys.stdout.flush()
def flush(self):
pass
handler = logging.StreamHandler(MyStream())
handler.setFormatter(logging.Formatter('%(message)s'))
rootlogger.addHandler(handler)
def teststring(s, name, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
parser=doctest.DocTestParser()):
from doctest import DebugRunner, DocTestRunner, master
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
test = parser.get_doctest(s, globs, name, name, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def replace_file(s, newfile):
engine = r"'(sqlite|postgresql|mysql):///.*'"
engine = re.compile(engine, re.MULTILINE)
s, n = re.subn(engine, "'sqlite:///" + newfile + "'", s)
if not n:
raise ValueError("Couldn't find suitable create_engine call to replace '%s' in it" % oldfile)
return s
for filename in ('ormtutorial', 'sqlexpression'):
filename = '%s.rst' % filename
s = open(filename).read()
#s = replace_file(s, ':memory:')
s = re.sub(r'{(?:stop|sql|opensql)}', '', s)
teststring(s, filename)
|
derekjchow/models
|
refs/heads/master
|
research/delf/delf/python/feature_aggregation_extractor_test.py
|
2
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature aggregation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from delf import aggregation_config_pb2
from delf import feature_aggregation_extractor
class FeatureAggregationTest(tf.test.TestCase):
def _CreateCodebook(self, checkpoint_path):
"""Creates codebook used in tests.
Args:
checkpoint_path: Directory where codebook is saved to.
"""
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
codebook = tf.Variable(
[[0.5, 0.5], [0.0, 0.0], [1.0, 0.0], [-0.5, -0.5], [0.0, 1.0]],
name='clusters')
saver = tf.compat.v1.train.Saver([codebook])
sess.run(tf.compat.v1.global_variables_initializer())
saver.save(sess, checkpoint_path)
def setUp(self):
self._codebook_path = os.path.join(tf.compat.v1.test.get_temp_dir(),
'test_codebook')
self._CreateCodebook(self._codebook_path)
def testComputeNormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedVladWithBatchingWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.feature_batch_size = 2
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 1.0, 1.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [1.0, 1.0, 0.0, 0.0, 0.0, 2.0, -0.5, 0.5, 0.0, 0.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeVladEmptyFeaturesWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.158114, 0.158114, 0.316228, 0.816228
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladEmptyRegionsWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
num_features_per_region = np.array([])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.079057, 0.079057, 0.158114, 0.408114
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladMisconfiguredFeatures(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
# Misconfigured number of features; there are only 4 features, but
# sum(num_features_per_region) = 5.
num_features_per_region = np.array([3, 2])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
with self.assertRaisesRegex(
ValueError,
r'Incorrect arguments: sum\(num_features_per_region\) and '
r'features.shape\[0\] are different'):
extractor.Extract(features, num_features_per_region)
def testComputeAsmkWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [-0.707107, 0.707107, 0.707107, 0.707107]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkStarWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
asmk_star, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(asmk_star, exp_asmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [0.707107, 0.707107, 0.0, 1.0, -0.707107, 0.707107]
exp_visual_words = [0, 2, 3]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rasmk, visual_words = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rasmk = [-0.707107, 0.707107, 0.361261, 0.932465]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(rasmk, exp_rasmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkStarWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
rasmk_star, visual_words = extractor.Extract(features,
num_features_per_region)
# Define expected results.
exp_rasmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(rasmk_star, exp_rasmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeUnknownAggregation(self):
# Construct inputs.
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = 0
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
with tf.Graph().as_default() as g, self.session(graph=g) as sess:
with self.assertRaisesRegex(ValueError, 'Invalid aggregation type'):
feature_aggregation_extractor.ExtractAggregatedRepresentation(
sess, config)
if __name__ == '__main__':
tf.test.main()
|
krbaker/Diamond
|
refs/heads/master
|
src/collectors/ups/ups.py
|
68
|
# coding=utf-8
"""
This class collects data from NUT, a UPS interface for linux.
#### Dependencies
* nut/upsc to be installed, configured and running.
"""
import diamond.collector
import os
import subprocess
from diamond.collector import str_to_bool
class UPSCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(UPSCollector, self).get_default_config_help()
config_help.update({
'ups_name': 'The name of the ups to collect data for',
'bin': 'The path to the upsc binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(UPSCollector, self).get_default_config()
config.update({
'path': 'ups',
'ups_name': 'cyberpower',
'bin': '/bin/upsc',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
self.log.error("%s is not executable", self.config['bin'])
return False
command = [self.config['bin'], self.config['ups_name']]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
for ln in p.strip().splitlines():
datapoint = ln.split(": ")
try:
val = float(datapoint[1])
except:
continue
if len(datapoint[0].split(".")) == 2:
# If the metric name is the same as the subfolder
# double it so it's visible.
name = ".".join([datapoint[0], datapoint[0].split(".")[1]])
else:
name = datapoint[0]
self.publish(name, val)
|
indictranstech/Das_Erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/employee/test_employee.py
|
59
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import frappe.utils
test_records = frappe.get_test_records('Employee')
class TestEmployee(unittest.TestCase):
def test_birthday_reminders(self):
employee = frappe.get_doc("Employee", frappe.db.sql_list("select name from tabEmployee limit 1")[0])
employee.date_of_birth = "1990" + frappe.utils.nowdate()[4:]
employee.company_email = "test@example.com"
employee.save()
from erpnext.hr.doctype.employee.employee import get_employees_who_are_born_today, send_birthday_reminders
self.assertTrue(employee.name in [e.name for e in get_employees_who_are_born_today()])
frappe.db.sql("delete from `tabBulk Email`")
hr_settings = frappe.get_doc("HR Settings", "HR Settings")
hr_settings.stop_birthday_reminders = 0
hr_settings.save()
send_birthday_reminders()
bulk_mails = frappe.db.sql("""select * from `tabBulk Email`""", as_dict=True)
self.assertTrue("Subject: Birthday Reminder for {0}".format(employee.employee_name) \
in bulk_mails[0].message)
|
marissazhou/django
|
refs/heads/master
|
tests/utils_tests/test_datastructures.py
|
262
|
"""
Tests for stuff in django.utils.datastructures.
"""
import copy
from django.test import SimpleTestCase
from django.utils import six
from django.utils.datastructures import (
DictWrapper, ImmutableList, MultiValueDict, MultiValueDictKeyError,
OrderedSet,
)
class OrderedSetTests(SimpleTestCase):
def test_bool(self):
# Refs #23664
s = OrderedSet()
self.assertFalse(s)
s.add(1)
self.assertTrue(s)
def test_len(self):
s = OrderedSet()
self.assertEqual(len(s), 0)
s.add(1)
s.add(2)
s.add(2)
self.assertEqual(len(s), 2)
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(
sorted(six.iteritems(d)),
[('name', 'Simon'), ('position', 'Developer')]
)
self.assertEqual(
sorted(six.iterlists(d)),
[('name', ['Adrian', 'Simon']), ('position', ['Developer'])]
)
six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(six.itervalues(d)),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual(
"Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a'
)
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/python-social-auth/social/strategies/cherrypy_strategy.py
|
77
|
import six
import cherrypy
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class CherryPyJinja2TemplateStrategy(BaseTemplateStrategy):
def __init__(self, strategy):
self.strategy = strategy
self.env = cherrypy.tools.jinja2env
def render_template(self, tpl, context):
return self.env.get_template(tpl).render(context)
def render_string(self, html, context):
return self.env.from_string(html).render(context)
class CherryPyStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = CherryPyJinja2TemplateStrategy
def get_setting(self, name):
return cherrypy.config[name]
def request_data(self, merge=True):
if merge:
data = cherrypy.request.params
elif cherrypy.request.method == 'POST':
data = cherrypy.body.params
else:
data = cherrypy.request.params
return data
def request_host(self):
return cherrypy.request.base
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def html(self, content):
return content
def authenticate(self, backend, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = backend
return backend.authenticate(*args, **kwargs)
def session_get(self, name, default=None):
return cherrypy.session.get(name, default)
def session_set(self, name, value):
cherrypy.session[name] = value
def session_pop(self, name):
cherrypy.session.pop(name, None)
def session_setdefault(self, name, value):
return cherrypy.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
return cherrypy.url(path or '')
def is_response(self, value):
return isinstance(value, six.string_types) or \
isinstance(value, cherrypy.CherryPyException)
|
Jorge-Rodriguez/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/mysql/mysql_user.py
|
3
|
#!/usr/bin/python
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database.
description:
- Adds or removes a user from a MySQL database.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
password:
description:
- set the user's password.
encrypted:
description:
- Indicate that the 'password' field is a `mysql_native_password` hash
type: bool
default: 'no'
version_added: "2.0"
host:
description:
- the 'host' part of the MySQL username
default: localhost
host_all:
description:
- override the host option, making ansible apply changes to
all hostnames for a given user. This option cannot be used
when creating users
type: bool
default: 'no'
version_added: "2.1"
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)."
- "Multiple privileges can be specified by separating each one using
a forward slash: C(db.table:priv/db.table:priv)."
- The format is based on MySQL C(GRANT) statement.
- Database and table names can be quoted, MySQL-style.
- If column privileges are used, the C(priv1,priv2) part must be
exactly as returned by a C(SHOW GRANT) statement. If not followed,
the module will always report changes. It includes grouping columns
by permission (C(SELECT(col1,col2)) instead of C(SELECT(col1),SELECT(col2))).
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
type: bool
default: 'no'
version_added: "1.4"
sql_log_bin:
description:
- Whether binary logging should be enabled or disabled for the connection.
type: bool
default: 'yes'
version_added: "2.1"
state:
description:
- Whether the user should exist. When C(absent), removes
the user.
default: present
choices: [ "present", "absent" ]
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
type: bool
default: 'no'
version_added: "1.3"
update_password:
default: always
choices: ['always', 'on_create']
version_added: "2.0"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
- Currently, there is only support for the `mysql_native_password` encrypted password hash module.
author: "Jonathan Mainguy (@Jmainguy)"
extends_documentation_fragment: mysql
'''
EXAMPLES = """
# Removes anonymous user account for localhost
- mysql_user:
name: ''
host: localhost
state: absent
# Removes all anonymous user accounts
- mysql_user:
name: ''
host_all: yes
state: absent
# Create database user with name 'bob' and password '12345' with all database privileges
- mysql_user:
name: bob
password: 12345
priv: '*.*:ALL'
state: present
# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges
- mysql_user:
name: bob
password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4'
encrypted: yes
priv: '*.*:ALL'
state: present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user:
name: bob
password: 12345
priv: '*.*:ALL,GRANT'
state: present
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user:
name: bob
append_privs: true
priv: '*.*:REQUIRESSL'
state: present
# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
- mysql_user:
login_user: root
login_password: 123456
name: sally
state: absent
# Ensure no user named 'sally' exists at all
- mysql_user:
name: sally
host_all: yes
state: absent
# Specify grants composed of more than one word
- mysql_user:
name: replication
password: 12345
priv: "*.*:REPLICATION CLIENT"
state: present
# Revoke all privileges for user 'bob' and password '12345'
- mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
# Example privileges string format
# mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
- mysql_user:
name: root
password: abc123
login_unix_socket: /var/run/mysqld/mysqld.sock
# Example of skipping binary logging while adding user 'bob'
- mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
sql_log_bin: no
# Example .my.cnf file for setting the root password
# [client]
# user=root
# password=n<_665{vS43y
"""
import re
import string
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
# User Authentication Management was change in MySQL 5.7
# This is a generic check for if the server version is less than version 5.7
def server_version_check(cursor):
cursor.execute("SELECT VERSION()")
result = cursor.fetchone()
version_str = result[0]
version = version_str.split('.')
# Currently we have no facility to handle new-style password update on
# mariadb and the old-style update continues to work
if 'mariadb' in version_str.lower():
return True
if int(version[0]) <= 5 and int(version[1]) < 7:
return True
else:
return False
def get_mode(cursor):
cursor.execute('SELECT @@GLOBAL.sql_mode')
result = cursor.fetchone()
mode_str = result[0]
if 'ANSI' in mode_str:
mode = 'ANSI'
else:
mode = 'NOTANSI'
return mode
def user_exists(cursor, user, host, host_all):
if host_all:
cursor.execute("SELECT count(*) FROM user WHERE user = %s", ([user]))
else:
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user, host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, host_all, password, encrypted, new_priv, check_mode):
# we cannot create users without a proper hostname
if host_all:
return False
if check_mode:
return True
if password and encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user, host, password))
elif password and not encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user, host, password))
else:
cursor.execute("CREATE USER %s@%s", (user, host))
if new_priv is not None:
for db_table, priv in iteritems(new_priv):
privileges_grant(cursor, user, host, db_table, priv)
return True
def is_hash(password):
ishash = False
if len(password) == 41 and password[0] == '*':
if frozenset(password[1:]).issubset(string.hexdigits):
ishash = True
return ishash
def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append_privs, module):
changed = False
grant_option = False
if host_all:
hostnames = user_get_hostnames(cursor, [user])
else:
hostnames = [host]
for host in hostnames:
# Handle clear text and hashed passwords.
if bool(password):
# Determine what user management method server uses
old_user_mgmt = server_version_check(cursor)
if old_user_mgmt:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user, host))
else:
cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user, host))
current_pass_hash = cursor.fetchone()
if encrypted:
encrypted_string = (password)
if is_hash(password):
if current_pass_hash[0] != encrypted_string:
if module.check_mode:
return True
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password))
else:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, password))
changed = True
else:
module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))")
else:
if old_user_mgmt:
cursor.execute("SELECT PASSWORD(%s)", (password,))
else:
cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,))
new_pass_hash = cursor.fetchone()
if current_pass_hash[0] != new_pass_hash[0]:
if module.check_mode:
return True
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password))
else:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password BY %s", (user, host, password))
changed = True
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user, host)
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
for db_table, priv in iteritems(curr_priv):
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
if module.check_mode:
return True
privileges_revoke(cursor, user, host, db_table, priv, grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
for db_table, priv in iteritems(new_priv):
if db_table not in curr_priv:
if module.check_mode:
return True
privileges_grant(cursor, user, host, db_table, priv)
changed = True
# If the db.table specification exists in both the user's current privileges
# and in the new privileges, then we need to see if there's a difference.
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if len(priv_diff) > 0:
if module.check_mode:
return True
if not append_privs:
privileges_revoke(cursor, user, host, db_table, curr_priv[db_table], grant_option)
privileges_grant(cursor, user, host, db_table, new_priv[db_table])
changed = True
return changed
def user_delete(cursor, user, host, host_all, check_mode):
if check_mode:
return True
if host_all:
hostnames = user_get_hostnames(cursor, [user])
for hostname in hostnames:
cursor.execute("DROP USER %s@%s", (user, hostname))
else:
cursor.execute("DROP USER %s@%s", (user, host))
return True
def user_get_hostnames(cursor, user):
cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
hostnames_raw = cursor.fetchall()
hostnames = []
for hostname_raw in hostnames_raw:
hostnames.append(hostname_raw[0])
return hostnames
def privileges_get(cursor, user, host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\5)? ?(.*)""", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(7):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(7):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv, mode):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
if mode == 'ANSI':
quote = '"'
else:
quote = '`'
output = {}
privs = []
for item in priv.strip().split('/'):
pieces = item.strip().rsplit(':', 1)
dbpriv = pieces[0].rsplit(".", 1)
# Check for FUNCTION or PROCEDURE object types
parts = dbpriv[0].split(" ", 1)
object_type = ''
if len(parts) > 1 and (parts[0] == 'FUNCTION' or parts[0] == 'PROCEDURE'):
object_type = parts[0] + ' '
dbpriv[0] = parts[1]
# Do not escape if privilege is for database or table, i.e.
# neither quote *. nor .*
for i, side in enumerate(dbpriv):
if side.strip('`') != '*':
dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote)
pieces[0] = object_type + '.'.join(dbpriv)
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
for i in output[pieces[0]]:
privs.append(re.sub(r'\s*\(.*\)', '', i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.*
# we still need to add USAGE as a privilege to avoid syntax errors
if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])):
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user, host, db_table, priv, grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % db_table]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, db_table)]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user, host, db_table, priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, db_table)]
query.append("TO %s@%s")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
query = ' '.join(query)
cursor.execute(query, (user, host))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None, no_log=True, type='str'),
encrypted=dict(default=False, type='bool'),
host=dict(default="localhost"),
host_all=dict(type="bool", default="no"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type='path'),
sql_log_bin=dict(default=True, type='bool'),
ssl_cert=dict(default=None, type='path'),
ssl_key=dict(default=None, type='path'),
ssl_ca=dict(default=None, type='path'),
),
supports_check_mode=True
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
encrypted = module.boolean(module.params["encrypted"])
host = module.params["host"].lower()
host_all = module.params["host_all"]
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
db = 'mysql'
sql_log_bin = module.params["sql_log_bin"]
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
cursor = None
try:
if check_implicit_admin:
try:
cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception:
pass
if not cursor:
cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception as e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
if not sql_log_bin:
cursor.execute("SET SQL_LOG_BIN=0;")
if priv is not None:
try:
mode = get_mode(cursor)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
try:
priv = privileges_unpack(priv, mode)
except Exception as e:
module.fail_json(msg="invalid privileges string: %s" % to_native(e))
if state == "present":
if user_exists(cursor, user, host, host_all):
try:
if update_password == 'always':
changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module)
else:
changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module)
except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
else:
if host_all:
module.fail_json(msg="host_all parameter cannot be used when adding a user")
try:
changed = user_add(cursor, user, host, host_all, password, encrypted, priv, module.check_mode)
except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "absent":
if user_exists(cursor, user, host, host_all):
changed = user_delete(cursor, user, host, host_all, module.check_mode)
else:
changed = False
module.exit_json(changed=changed, user=user)
if __name__ == '__main__':
main()
|
after12am/summary
|
refs/heads/master
|
summary/digest.py
|
1
|
# -*- coding: utf-8 -*-
import os, sys
import json
import nltk
import numpy
from pprint import pprint
N = 100 # words num
CLUSTER_THRESHOLD = 5 # distance of words
TOP_SENTENCES = 5 # summurized sentences num
def _score_sentences(sentences, important_words):
scores = []
sentence_idx = -1
for s in [nltk.tokenize.word_tokenize(s) for s in sentences]:
sentence_idx += 1
word_idx = []
for w in important_words:
try:
# compute position of important word in this sentence
word_idx.append(s.index(w))
except ValueError, e:
# not include w in this sentence
pass
word_idx.sort()
# not include important word at all
if len(word_idx) == 0:
continue
# Calculate the cluster using the threshold of maximum distance
# for two consecutive words with an index of words
clusters = []
cluster = [word_idx[0]]
i = 0
while i < len(word_idx):
if word_idx[i] - word_idx[i - 1] < CLUSTER_THRESHOLD:
cluster.append(word_idx[i])
else:
clusters.append(cluster[:])
cluster = [word_idx[i]]
i += 1
clusters.append(cluster)
# calculates a score for each cluster.
# The maximum score of the cluster is the score for the statement.
max_cluster_score = 0
for c in clusters:
significant_words_in_cluster = len(c)
total_words_in_cluster = c[-1] - c[0] + 1
score = 1.0 * significant_words_in_cluster * \
significant_words_in_cluster / total_words_in_cluster
if score > max_cluster_score:
max_cluster_score = score
scores.append((sentence_idx, score))
return scores
def summarize(text):
sentences = [s.strip() for s in nltk.tokenize.sent_tokenize(text)]
normalized_sentences = [s.lower() for s in sentences]
words = [w.lower() for sentence in normalized_sentences \
for w in nltk.tokenize.word_tokenize(sentence) if len(w) > 2]
fdist = nltk.FreqDist(words)
top_n_words = [w[0] for w in fdist.items() \
if w[0] not in nltk.corpus.stopwords.words('english')][:N]
scored_sentences = _score_sentences(normalized_sentences, top_n_words)
avg = numpy.mean([s[1] for s in scored_sentences])
std = numpy.std([s[1] for s in scored_sentences])
mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_sentences if score > avg + 0.5 * std]
top_n_scored = sorted(scored_sentences, key=lambda s: s[1])[-TOP_SENTENCES:]
top_n_scored = sorted(top_n_scored, key=lambda s: s[0])
return dict(top_n_summary = [sentences[idx] for (idx, score) in top_n_scored], \
mean_scored_summary = [sentences[idx] for (idx, score) in mean_scored])
def main():
result = summarize(u"""
Joan Rivers, the raspy loudmouth who pounced on America’s obsessions with flab, face-lifts, body hair and other blemishes of neurotic life, including her own, in five decades of caustic comedy that propelled her from nightclubs to television to international stardom, died on Thursday in Manhattan. She was 81.
Her daughter, Melissa Rivers, confirmed her death. A spokeswoman, Judy Katz, said the cause had not yet been determined.
Ms.Rivers died at Mount Sinai Hospital, where she had been taken last Thursday from an outpatient surgery clinic after going into cardiac arrest and losing consciousness, the authorities said. The State Health Department is investigating the circumstances that led to her death, a state official said Thursday.
Ms.Rivers had been in the clinic for a minor procedure on her vocal cords, according to a spokesman. Her daughter said Tuesday that her mother was on life support and Wednesday that she was out of intensive care.
Ms.Rivers was one of America’s first successful female stand-up comics in an aggressive tradition that had been almost exclusively the province of men, from Don Rickles to Lenny Bruce. She was a role model and an inspiration for tough-talking comedians like Roseanne Barr, Sarah Silverman and countless others.
""")
pprint(result)
if __name__ == '__main__':
main()
|
deatharrow/lge-kernel-e400
|
refs/heads/master
|
Documentation/target/tcm_mod_builder.py
|
4981
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
janusnic/dj-21v
|
refs/heads/master
|
unit_05/mysite/blog/admin.py
|
1
|
from django.contrib import admin
from .models import Category, Tag, Article
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
list_display_links = ('name',)
search_fields = ['name', 'slug', 'description']
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Category,CategoryAdmin)
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
list_display_links = ('name',)
search_fields = ['name', 'slug']
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Tag, TagAdmin)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'publish_date', 'status', 'was_published_recently')
list_filter = ['publish_date']
search_fields = ['title']
ordering = ['publish_date']
filter_horizontal = ('tags',)
prepopulated_fields = {"slug": ("title",)}
date_hierarchy = 'publish_date'
readonly_fields = ('publish_date','created_date')
fieldsets = [
('Item', {'fields': [('title','slug'),'category','content']}),
('Date information', {'fields': [('publish_date','created_date')], 'classes': ['collapse']}),
('Related tags', {'fields': ['tags']}),
('Metas', {'fields': [('status')]}),
]
admin.site.register(Article, ArticleAdmin)
|
fuhrysteve/flask-security
|
refs/heads/develop
|
tests/test_recoverable.py
|
4
|
# -*- coding: utf-8 -*-
"""
test_recoverable
~~~~~~~~~~~~~~~~
Recoverable functionality tests
"""
import time
import pytest
from flask import Flask
from utils import authenticate, logout
from flask_security.core import UserMixin
from flask_security.forms import LoginForm
from flask_security.signals import password_reset, \
reset_password_instructions_sent
from flask_security.utils import capture_reset_password_requests, string_types
pytestmark = pytest.mark.recoverable()
def test_recoverable_flag(app, client, get_message):
recorded_resets = []
recorded_instructions_sent = []
@password_reset.connect_via(app)
def on_password_reset(app, user):
recorded_resets.append(user)
@reset_password_instructions_sent.connect_via(app)
def on_instructions_sent(app, user, token):
assert isinstance(app, Flask)
assert isinstance(user, UserMixin)
assert isinstance(token, string_types)
recorded_instructions_sent.append(user)
# Test the reset view
response = client.get('/reset')
assert b'<h1>Send password reset instructions</h1>' in response.data
# Test submitting email to reset password creates a token and sends email
with capture_reset_password_requests() as requests:
with app.mail.record_messages() as outbox:
response = client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
assert len(recorded_instructions_sent) == 1
assert len(outbox) == 1
assert response.status_code == 200
assert get_message(
'PASSWORD_RESET_REQUEST',
email='joe@lp.com') in response.data
token = requests[0]['token']
# Test view for reset token
response = client.get('/reset/' + token)
assert b'<h1>Reset password</h1>' in response.data
# Test submitting a new password
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert get_message('PASSWORD_RESET') in response.data
assert len(recorded_resets) == 1
assert b'Hello joe@lp.com' not in response.data
logout(client)
# Test logging in with the new password
response = authenticate(
client,
'joe@lp.com',
'newpassword',
follow_redirects=True)
assert b'Hello joe@lp.com' in response.data
logout(client)
# Test submitting JSON
response = client.post('/reset', data='{"email": "joe@lp.com"}', headers={
'Content-Type': 'application/json'
})
assert response.headers['Content-Type'] == 'application/json'
assert 'user' not in response.jdata['response']
logout(client)
# Test invalid email
response = client.post(
'/reset',
data=dict(
email='bogus@lp.com'),
follow_redirects=True)
assert get_message('USER_DOES_NOT_EXIST') in response.data
logout(client)
# Test invalid token
response = client.post('/reset/bogus', data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert get_message('INVALID_RESET_PASSWORD_TOKEN') in response.data
# Test mangled token
token = (
"WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd."
"BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM"
"&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d")
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert get_message('INVALID_RESET_PASSWORD_TOKEN') in response.data
def test_login_form_description(sqlalchemy_app):
app = sqlalchemy_app()
with app.test_request_context('/login'):
login_form = LoginForm()
expected = '<a href="/reset">Forgot password?</a>'
assert login_form.password.description == expected
@pytest.mark.settings(reset_password_within='1 milliseconds')
def test_expired_reset_token(client, get_message):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
user = requests[0]['user']
token = requests[0]['token']
time.sleep(1)
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
msg = get_message(
'PASSWORD_RESET_EXPIRED',
within='1 milliseconds',
email=user.email)
assert msg in response.data
def test_reset_token_deleted_user(app, client, get_message,
sqlalchemy_datastore):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='gene@lp.com'),
follow_redirects=True)
user = requests[0]['user']
token = requests[0]['token']
# Delete user
with app.app_context():
sqlalchemy_datastore.delete(user)
sqlalchemy_datastore.commit()
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
msg = get_message('INVALID_RESET_PASSWORD_TOKEN')
assert msg in response.data
def test_used_reset_token(client, get_message):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
token = requests[0]['token']
# use the token
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert get_message('PASSWORD_RESET') in response.data
logout(client)
# attempt to use it a second time
response2 = client.post('/reset/' + token, data={
'password': 'otherpassword',
'password_confirm': 'otherpassword'
}, follow_redirects=True)
msg = get_message('INVALID_RESET_PASSWORD_TOKEN')
assert msg in response2.data
def test_reset_token_redirect(client, get_message):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
token = requests[0]['token']
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
})
assert 'location' in response.headers
assert '/login' in response.location
response = client.get(response.location)
assert get_message('PASSWORD_RESET') in response.data
@pytest.mark.settings(post_reset_view='/post_reset')
def test_reset_token_redirect_to_post_reset(client, get_message):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
token = requests[0]['token']
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert b'Post Reset' in response.data
def test_reset_passwordless_user(client, get_message):
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='jess@lp.com'),
follow_redirects=True)
token = requests[0]['token']
# use the token
response = client.post('/reset/' + token, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
assert get_message('PASSWORD_RESET') in response.data
@pytest.mark.settings(reset_url='/custom_reset')
def test_custom_reset_url(client):
response = client.get('/custom_reset')
assert response.status_code == 200
@pytest.mark.settings(
reset_password_template='custom_security/reset_password.html',
forgot_password_template='custom_security/forgot_password.html')
def test_custom_reset_templates(client):
response = client.get('/reset')
assert b'CUSTOM FORGOT PASSWORD' in response.data
with capture_reset_password_requests() as requests:
client.post(
'/reset',
data=dict(
email='joe@lp.com'),
follow_redirects=True)
token = requests[0]['token']
response = client.get('/reset/' + token)
assert b'CUSTOM RESET PASSWORD' in response.data
|
bdoner/SickRage
|
refs/heads/master
|
lib/hachoir_parser/file_system/__init__.py
|
94
|
from hachoir_parser.file_system.ext2 import EXT2_FS
from hachoir_parser.file_system.fat import FAT12, FAT16, FAT32
from hachoir_parser.file_system.mbr import MSDos_HardDrive
from hachoir_parser.file_system.ntfs import NTFS
from hachoir_parser.file_system.iso9660 import ISO9660
from hachoir_parser.file_system.reiser_fs import REISER_FS
from hachoir_parser.file_system.linux_swap import LinuxSwapFile
|
wvolz/smartplug-monitor
|
refs/heads/master
|
multiple.py
|
1
|
#!/usr/bin/python
# This is the an implementation of monitoring the Lowe's Iris Smart
# Switch that I use. It will join with a switch and does NOT allow you
# to control the switch
#
# This version has been adapted to support more than one switch and will
# add a new record to my database to hold the data. Adapt it as you need
# to.
#
# Have fun
from xbee import ZigBee
from apscheduler.scheduler import Scheduler
import logging
import datetime
import time
import serial
import sys
import shlex
import sqlite3
#-------------------------------------------------
# the database where I'm storing stuff
#DATABASE='/home/pi/database/desert-home'
DATABASE='/home/volz/xbee-monitor/desert-home.sqlite3'
# on the Raspberry Pi the serial port is ttyAMA0
#XBEEPORT = '/dev/ttyUSB1'
XBEEPORT = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_AD02FMB2-if00-port0'
XBEEBAUD_RATE = 9600
# The XBee addresses I'm dealing with
BROADCAST = '\x00\x00\x00\x00\x00\x00\xff\xff'
UNKNOWN = '\xff\xfe' # This is the 'I don't know' 16 bit address
#-------------------------------------------------
logging.basicConfig()
# this is the only way I could think of to get the address strings to store.
# I take the ord() to get a number, convert to hex, then take the 3 to end
# characters and pad them with zero and finally put the '0x' back on the front
# I put spaces in between each hex character to make it easier to read. This
# left an extra space at the end, so I slice it off in the return statement.
# I hope this makes it easier to grab it out of the database when needed
def addrToString(funnyAddrString):
hexified = ''
for i in funnyAddrString:
hexified += '0x' + hex(ord(i))[2:].zfill(2) + ' '
return hexified[:-1]
#------------ XBee Stuff -------------------------
# Open serial port for use by the XBee
ser = serial.Serial(XBEEPORT, XBEEBAUD_RATE)
# this is a call back function. When a message
# comes in this function will get the data
def messageReceived(data):
#print 'gotta packet'
#print data
clusterId = (ord(data['cluster'][0])*256) + ord(data['cluster'][1])
switchLongAddr = data['source_addr_long']
sourceAddr = switchLongAddr.encode("hex")
print 'Addr:', sourceAddr, 'Cluster ID:', hex(clusterId),
#print 'Cluster ID:', hex(clusterId),
if (clusterId == 0x13):
# This is the device announce message.
# due to timing problems with the switch itself, I don't
# respond to this message, I save the response for later after the
# Match Descriptor request comes in. You'll see it down below.
# if you want to see the data that came in with this message, just
# uncomment the 'print data' comment up above
print 'Device Announce Message'
elif (clusterId == 0x8005):
# this is the Active Endpoint Response This message tells you
# what the device can do, but it isn't constructed correctly to match
# what the switch can do according to the spec. This is another
# message that gets it's response after I receive the Match Descriptor
print 'Active Endpoint Response'
elif (clusterId == 0x0006):
# Match Descriptor Request; this is the point where I finally
# respond to the switch. Several messages are sent to cause the
# switch to join with the controller at a network level and to cause
# it to regard this controller as valid.
#
# First the Active Endpoint Request
payload1 = '\x00\x00'
zb.send('tx_explicit',
dest_addr_long = data['source_addr_long'],
dest_addr = data['source_addr'],
src_endpoint = '\x00',
dest_endpoint = '\x00',
cluster = '\x00\x05',
profile = '\x00\x00',
data = payload1
)
print 'sent Active Endpoint'
# Now the Match Descriptor Response
payload2 = '\x00\x00\x00\x00\x01\x02'
zb.send('tx_explicit',
dest_addr_long = data['source_addr_long'],
dest_addr = data['source_addr'],
src_endpoint = '\x00',
dest_endpoint = '\x00',
cluster = '\x80\x06',
profile = '\x00\x00',
data = payload2
)
print 'Sent Match Descriptor'
# Now there are two messages directed at the hardware
# code (rather than the network code. The switch has to
# receive both of these to stay joined.
payload3 = '\x11\x01\x01'
zb.send('tx_explicit',
dest_addr_long = data['source_addr_long'],
dest_addr = data['source_addr'],
src_endpoint = '\x00',
dest_endpoint = '\x02',
cluster = '\x00\xf6',
profile = '\xc2\x16',
data = payload2
)
payload4 = '\x19\x01\xfa\x00\x01'
zb.send('tx_explicit',
dest_addr_long = data['source_addr_long'],
dest_addr = data['source_addr'],
src_endpoint = '\x00',
dest_endpoint = '\x02',
cluster = '\x00\xf0',
profile = '\xc2\x16',
data = payload4
)
print 'Sent hardware join messages'
# now that it should have joined, I'll add a record to the database to
# hold the status. I'll just name the device 'unknown' so it can
# be updated by hand using sqlite3 directly. If the device already exists,
# I'll leave the name alone and just use the existing record
# Yes, this means you'll have to go into the database and assign it a name
#
dbconn = sqlite3.connect(DATABASE)
c = dbconn.cursor()
# See if the device is already in the database
c.execute("select name from smartswitch "
"where longaddress = ?; ",
(addrToString(data['source_addr_long']),))
switchrecord = c.fetchone()
if switchrecord is not None:
print "Device %s is rejoining the network" %(switchrecord[0])
else:
print "Adding new device"
c.execute("insert into smartswitch(name,longaddress, shortaddress, status, watts, twatts, utime)"
"values (?, ?, ?, ?, ?, ?, ?);",
('unknown',
addrToString(data['source_addr_long']),
addrToString(data['source_addr']),
'unknown',
'0',
'0',
time.strftime("%A, %B, %d at %H:%M:%S")))
dbconn.commit()
dbconn.close()
elif (clusterId == 0xef):
clusterCmd = ord(data['rf_data'][2])
if (clusterCmd == 0x81):
usage = ord(data['rf_data'][3]) + (ord(data['rf_data'][4]) * 256)
dbconn = sqlite3.connect(DATABASE)
c = dbconn.cursor()
# This is commented out because I don't need the name
# unless I'm debugging.
# get device name from database
#c.execute("select name from smartswitch "
# "where longaddress = ?; ",
# (addrToString(data['source_addr_long']),))
#name = c.fetchone()[0].capitalize()
#print "%s Instaneous Power, %d Watts" %(name, usage)
# do database updates
c.execute("update smartswitch "
"set watts = ?, "
"shortaddress = ?, "
"utime = ? where longaddress = ?; ",
(usage, addrToString(data['source_addr']),
time.strftime("%A, %B, %d at %H:%M:%S"), addrToString(data['source_addr_long'])))
dbconn.commit()
dbconn.close()
elif (clusterCmd == 0x82):
usage = (ord(data['rf_data'][3]) +
(ord(data['rf_data'][4]) * 256) +
(ord(data['rf_data'][5]) * 256 * 256) +
(ord(data['rf_data'][6]) * 256 * 256 * 256) )
upTime = (ord(data['rf_data'][7]) +
(ord(data['rf_data'][8]) * 256) +
(ord(data['rf_data'][9]) * 256 * 256) +
(ord(data['rf_data'][10]) * 256 * 256 * 256) )
dbconn = sqlite3.connect(DATABASE)
c = dbconn.cursor()
c.execute("select name from smartswitch "
"where longaddress = ?; ",
(addrToString(data['source_addr_long']),))
name = c.fetchone()[0].capitalize()
print "%s Minute Stats: Usage, %d Watt Hours; Uptime, %d Seconds" %(name, usage/3600, upTime)
# update database stuff
c.execute("update smartswitch "
"set twatts = ?, "
"shortaddress = ?, "
"utime = ? where longaddress = ?; ",
(usage, addrToString(data['source_addr']),
time.strftime("%A, %B, %d at %H:%M:%S"), addrToString(data['source_addr_long'])))
dbconn.commit()
dbconn.close()
elif (clusterId == 0xf0):
clusterCmd = ord(data['rf_data'][2])
# print "Cluster Cmd:", hex(clusterCmd),
# if (clusterCmd == 0xfb):
#print "Temperature ??"
# else:
#print "Unimplemented"
elif (clusterId == 0xf6):
clusterCmd = ord(data['rf_data'][2])
# if (clusterCmd == 0xfd):
# pass #print "RSSI value:", ord(data['rf_data'][3])
# elif (clusterCmd == 0xfe):
# pass #print "Version Information"
# else:
# pass #print data['rf_data']
elif (clusterId == 0xee):
clusterCmd = ord(data['rf_data'][2])
status = ''
if (clusterCmd == 0x80):
if (ord(data['rf_data'][3]) & 0x01):
status = "ON"
else:
status = "OFF"
dbconn = sqlite3.connect(DATABASE)
c = dbconn.cursor()
c.execute("select name from smartswitch "
"where longaddress = ?; ",
(addrToString(data['source_addr_long']),))
print c.fetchone()[0].capitalize(),
print "Switch is", status
c.execute("update smartswitch "
"set status = ?, "
"shortaddress = ?, "
"utime = ? where longaddress = ?; ",
(status, addrToString(data['source_addr']),
time.strftime("%A, %B, %d at %H:%M:%S"), addrToString(data['source_addr_long'])))
dbconn.commit()
dbconn.close()
else:
print "Unimplemented Cluster ID", hex(clusterId)
print
def sendSwitch(whereLong, whereShort, srcEndpoint, destEndpoint,
clusterId, profileId, clusterCmd, databytes):
payload = '\x11\x00' + clusterCmd + databytes
# print 'payload',
# for c in payload:
# print hex(ord(c)),
# print
# print 'long address:',
# for c in whereLong:
# print hex(ord(c)),
# print
zb.send('tx_explicit',
dest_addr_long = whereLong,
dest_addr = whereShort,
src_endpoint = srcEndpoint,
dest_endpoint = destEndpoint,
cluster = clusterId,
profile = profileId,
data = payload
)
# This just puts a time stamp in the log file for tracking
def timeInLog():
print time.strftime("%A, %B, %d at %H:%M:%S")
#------------------If you want to schedule something to happen -----
scheditem = Scheduler()
scheditem.start()
scheditem.add_interval_job(timeInLog, minutes=15)
#-----------------------------------------------------------------
# Create XBee library API object, which spawns a new thread
#zb = ZigBee(ser, callback=messageReceived)
zb = ZigBee(ser, escaped=True, callback=messageReceived)
print "started at ", time.strftime("%A, %B, %d at %H:%M:%S")
while True:
try:
time.sleep(0.1)
sys.stdout.flush() # if you're running non interactive, do this
except KeyboardInterrupt:
print "Keyboard interrupt"
break
except:
print "Unexpected error:", sys.exc_info()[0]
break
print "After the while loop"
# halt() must be called before closing the serial
# port in order to ensure proper thread shutdown
zb.halt()
ser.close()
|
uxlsl/blog
|
refs/heads/master
|
movie/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 13:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='\u540d\u79f0')),
('url', models.URLField(verbose_name='\u6240\u5728\u7f51\u5740')),
('source', models.CharField(max_length=128, verbose_name='\u4e0b\u8f7d\u6765\u6e90')),
('down_urls', models.TextField(verbose_name='\u4e0b\u8f7d\u5730\u5740,\u591a\u4e2a\u6570\u636ejson)')),
('create_at', models.DateTimeField(auto_now=True, verbose_name='\u65f6\u95f4')),
('update_at', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
],
),
]
|
SmartInfrastructures/fuel-web-dev
|
refs/heads/master
|
network_checker/url_access_checker/network.py
|
3
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
from logging import getLogger
import netifaces
from url_access_checker.errors import CommandFailed
from url_access_checker.utils import execute
logger = getLogger(__name__)
def get_default_gateway():
"""Return ipaddress, interface pair for default gateway
"""
gws = netifaces.gateways()
if 'default' in gws:
return gws['default'][netifaces.AF_INET]
return None, None
def check_ifaddress_present(iface, addr):
"""Check if required ipaddress already assigned to the iface
"""
for ifaddress in netifaces.ifaddresses(iface).get(netifaces.AF_INET, []):
if ifaddress['addr'] in addr:
return True
return False
def check_exist(iface):
rc, _, err = execute(['ip', 'link', 'show', iface])
if rc == 1 and 'does not exist' in err:
return False
elif rc:
msg = 'ip link show {0} failed with {1}'.format(iface, err)
raise CommandFailed(msg)
return True
def check_up(iface):
rc, stdout, _ = execute(['ip', 'link', 'show', iface])
return 'UP' in stdout
def log_network_info(stage):
logger.info('Logging networking info at %s', stage)
stdout = execute(['ip', 'a'])[1]
logger.info('ip a: %s', stdout)
stdout = execute(['ip', 'ro'])[1]
logger.info('ip ro: %s', stdout)
class Eth(object):
def __init__(self, iface):
self.iface = iface
self.is_up = None
def setup(self):
self.is_up = check_up(self.iface)
if self.is_up is False:
rc, out, err = execute(['ip', 'link', 'set',
'dev', self.iface, 'up'])
if rc:
msg = 'Cannot up interface {0}. Err: {1}'.format(
self.iface, err)
raise CommandFailed(msg)
def teardown(self):
if self.is_up is False:
execute(['ip', 'link', 'set', 'dev', self.iface, 'down'])
class Vlan(Eth):
def __init__(self, iface, vlan):
self.parent = iface
self.vlan = str(vlan)
self.iface = '{0}.{1}'.format(iface, vlan)
self.is_present = None
self.is_up = None
def setup(self):
self.is_present = check_exist(self.iface)
if self.is_present is False:
rc, out, err = execute(
['ip', 'link', 'add',
'link', self.parent, 'name',
self.iface, 'type', 'vlan', 'id', self.vlan])
if rc:
msg = (
'Cannot create tagged interface {0}.'
' With parent {1}. Err: {2}'.format(
self.iface, self.parent, err))
raise CommandFailed(msg)
super(Vlan, self).setup()
def teardown(self):
super(Vlan, self).teardown()
if self.is_present is False:
execute(['ip', 'link', 'delete', self.iface])
class IP(object):
def __init__(self, iface, addr):
self.iface = iface
self.addr = addr
self.is_present = None
def setup(self):
self.is_present = check_ifaddress_present(self.iface, self.addr)
if self.is_present is False:
rc, out, err = execute(['ip', 'a', 'add', self.addr,
'dev', self.iface])
if rc:
msg = 'Cannot add address {0} to {1}. Err: {2}'.format(
self.addr, self.iface, err)
raise CommandFailed(msg)
def teardown(self):
if self.is_present is False:
execute(['ip', 'a', 'del', self.addr, 'dev', self.iface])
class Route(object):
def __init__(self, iface, gateway):
self.iface = iface
self.gateway = gateway
self.default_gateway = None
self.df_iface = None
def setup(self):
self.default_gateway, self.df_iface = get_default_gateway()
rc = None
if (self.default_gateway, self.df_iface) == (None, None):
rc, out, err = execute(
['ip', 'ro', 'add',
'default', 'via', self.gateway, 'dev', self.iface])
elif ((self.default_gateway, self.df_iface)
!= (self.gateway, self.iface)):
rc, out, err = execute(
['ip', 'ro', 'change',
'default', 'via', self.gateway, 'dev', self.iface])
if rc:
msg = ('Cannot add default gateway {0} on iface {1}.'
' Err: {2}'.format(self.gateway, self.iface, err))
raise CommandFailed(msg)
def teardown(self):
if (self.default_gateway, self.df_iface) == (None, None):
execute(['ip', 'ro', 'del',
'default', 'via', self.gateway, 'dev', self.iface])
elif ((self.default_gateway, self.df_iface)
!= (self.gateway, self.iface)):
execute(['ip', 'ro', 'change',
'default', 'via', self.default_gateway,
'dev', self.df_iface])
@contextmanager
def manage_network(iface, addr, gateway, vlan=None):
log_network_info('before setup')
actions = [Eth(iface)]
if vlan:
vlan_action = Vlan(iface, vlan)
actions.append(vlan_action)
iface = vlan_action.iface
actions.append(IP(iface, addr))
actions.append(Route(iface, gateway))
executed = []
try:
for a in actions:
a.setup()
executed.append(a)
log_network_info('after setup')
yield
except Exception:
logger.exception('Unexpected failure.')
raise
finally:
for a in reversed(executed):
a.teardown()
log_network_info('after teardown')
|
IronLanguages/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/test/test_grammar.py
|
1
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test.support import check_syntax_error
import inspect
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def test_backslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def test_plain_integers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def test_long_integers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def test_floats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def test_float_exponent_tokenization(self):
# See issue 21642.
self.assertEqual(1 if 1else 0, 1)
self.assertEqual(1 if 0else 0, 0)
self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
def test_string_literals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
def test_ellipsis(self):
x = ...
self.assertTrue(x is Ellipsis)
self.assertRaises(SyntaxError, eval, ".. .")
def test_eof_error(self):
samples = ("def foo(", "\ndef foo(", "def foo(\n")
for s in samples:
with self.assertRaises(SyntaxError) as cm:
compile(s, "<test>", "exec")
self.assertIn("unexpected EOF", str(cm.exception))
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def test_eval_input(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def test_funcdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(*[] or [2])
d01(*() or (), *{} and (), **() or {})
d01(**{'a':2})
d01(**{'a':2} or {})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
def pos0key1(*, key): return key
pos0key1(key=100)
def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertEqual(f(1, *(2,3), 4), ((1, 2, 3, 4), {}))
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
self.assertEqual(f(**{'eggs':'scrambled', 'spam':'fried'}),
((), {'eggs':'scrambled', 'spam':'fried'}))
self.assertEqual(f(spam='fried', **{'eggs':'scrambled'}),
((), {'eggs':'scrambled', 'spam':'fried'}))
# argument annotation tests
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
def f(x: int): pass
self.assertEqual(f.__annotations__, {'x': int})
def f(*x: str): pass
self.assertEqual(f.__annotations__, {'x': str})
def f(**x: float): pass
self.assertEqual(f.__annotations__, {'x': float})
def f(x, y: 1+2): pass
self.assertEqual(f.__annotations__, {'y': 3})
def f(a, b: 1, c: 2, d): pass
self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
def f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6): pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
def f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6, h: 7, i=8, j: 9 = 10,
**k: 11) -> 12: pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for issue #20625 -- annotations mangling
class Spam:
def f(self, *, __kw: 1):
pass
class Ham(Spam): pass
self.assertEqual(Spam.f.__annotations__, {'_Spam__kw': 1})
self.assertEqual(Ham.f.__annotations__, {'_Spam__kw': 1})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
def null(x): return x
@null
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
# test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
def f(*, k=1): return closure
def f() -> int: return closure
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def test_lambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
l6 = lambda x, y, *, k=20: x+y+k
self.assertEqual(l6(1,2), 1+2+20)
self.assertEqual(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def test_simple_stmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def test_expr_stmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
# Check the heuristic for print & exec covers significant cases
# As well as placing some limits on false positives
@unittest.skipIf(sys.implementation.name=='ironpython', 'https://github.com/IronLanguages/ironpython3/issues/374')
def test_former_statements_refer_to_builtins(self):
keywords = "print", "exec"
# Cases where we want the custom error
cases = [
"{} foo",
"{} {{1:foo}}",
"if 1: {} foo",
"if 1: {} {{1:foo}}",
"if 1:\n {} foo",
"if 1:\n {} {{1:foo}}",
]
for keyword in keywords:
custom_msg = "call to '{}'".format(keyword)
for case in cases:
source = case.format(keyword)
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, custom_msg):
exec(source)
source = source.replace("foo", "(foo.)")
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(source)
def test_del_stmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def test_pass_stmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def test_break_stmt(self):
# 'break'
while 1: break
def test_continue_stmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def test_return(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def test_yield(self):
# Allowed as standalone statement
def g(): yield 1
def g(): yield from ()
# Allowed as RHS of assignment
def g(): x = yield 1
def g(): x = yield from ()
# Ordinary yield accepts implicit tuples
def g(): yield 1, 1
def g(): x = yield 1, 1
# 'yield from' does not
check_syntax_error(self, "def g(): yield from (), 1")
check_syntax_error(self, "def g(): x = yield from (), 1")
# Requires parentheses as subexpression
def g(): 1, (yield 1)
def g(): 1, (yield from ())
check_syntax_error(self, "def g(): 1, yield 1")
check_syntax_error(self, "def g(): 1, yield from ()")
# Requires parentheses as call argument
def g(): f((yield 1))
def g(): f((yield 1), 1)
def g(): f((yield from ()))
def g(): f((yield from ()), 1)
check_syntax_error(self, "def g(): f(yield 1)")
check_syntax_error(self, "def g(): f(yield 1, 1)")
check_syntax_error(self, "def g(): f(yield from ())")
check_syntax_error(self, "def g(): f(yield from (), 1)")
# Not allowed at top level
check_syntax_error(self, "yield")
check_syntax_error(self, "yield from")
# Not allowed at class scope
check_syntax_error(self, "class foo:yield 1")
check_syntax_error(self, "class foo:yield from ()")
def test_raise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def test_import(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def test_global(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def test_nonlocal(self):
# 'nonlocal' NAME (',' NAME)*
x = 0
y = 0
def f():
nonlocal x
nonlocal x, y
def test_assert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert True
except AssertionError as e:
self.fail("'assert True' should not have raised an AssertionError")
try:
assert True, 'this should always pass'
except AssertionError as e:
self.fail("'assert True, msg' should not have "
"raised an AssertionError")
# these tests fail if python is run with -O, so check __debug__
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def testAssert2(self):
try:
assert 0, "msg"
except AssertionError as e:
self.assertEqual(e.args[0], "msg")
else:
self.fail("AssertionError not raised by assert 0")
try:
assert False
except AssertionError as e:
self.assertEqual(len(e.args), 0)
else:
self.fail("AssertionError not raised by 'assert False'")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def test_if(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def test_while(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def test_for(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def test_try(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def test_suite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def test_test(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def test_comparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def test_binary_mask_ops(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def test_shift_ops(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def test_additive_ops(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def test_multiplicative_ops(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def test_unary_ops(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def test_selectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def test_atoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def test_classdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def test_dictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def test_listcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def test_genexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def test_comprehension_specials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def test_if_else_expr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(msg)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
def test_matrix_mul(self):
# This is not intended to be a comprehensive test, rather just to be few
# samples of the @ operator in test_grammar.py.
class M:
def __matmul__(self, o):
return 4
def __imatmul__(self, o):
self.other = o
return self
m = M()
self.assertEqual(m @ m, 4)
m @= 42
self.assertEqual(m.other, 42)
if __name__ == '__main__':
unittest.main()
|
tobegit3hub/keystone_docker
|
refs/heads/master
|
keystone/tests/unit/filtering.py
|
10
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from six.moves import range
CONF = cfg.CONF
class FilterTests(object):
# Provide support for checking if a batch of list items all
# exist within a contiguous range in a total list
def _match_with_list(self, this_batch, total_list,
batch_size=None,
list_start=None, list_end=None):
if batch_size is None:
batch_size = len(this_batch)
if list_start is None:
list_start = 0
if list_end is None:
list_end = len(total_list)
for batch_item in range(0, batch_size):
found = False
for list_item in range(list_start, list_end):
if this_batch[batch_item]['id'] == total_list[list_item]['id']:
found = True
self.assertTrue(found)
def _create_entity(self, entity_type):
"""Find the create_<entity_type> method.
Searches through the [identity_api, resource_api, assignment_api]
managers for a method called create_<entity_type> and returns the first
one.
"""
f = getattr(self.identity_api, 'create_%s' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'create_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'create_%s' % entity_type)
return f
def _delete_entity(self, entity_type):
"""Find the delete_<entity_type> method.
Searches through the [identity_api, resource_api, assignment_api]
managers for a method called delete_<entity_type> and returns the first
one.
"""
f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'delete_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'delete_%s' % entity_type)
return f
def _list_entities(self, entity_type):
"""Find the list_<entity_type> method.
Searches through the [identity_api, resource_api, assignment_api]
managers for a method called list_<entity_type> and returns the first
one.
"""
f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'list_%ss' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'list_%ss' % entity_type)
return f
def _create_one_entity(self, entity_type, domain_id, name):
new_entity = {'name': name,
'domain_id': domain_id}
if entity_type in ['user', 'group']:
# The manager layer creates the ID for users and groups
new_entity = self._create_entity(entity_type)(new_entity)
else:
new_entity['id'] = '0000' + uuid.uuid4().hex
self._create_entity(entity_type)(new_entity['id'], new_entity)
return new_entity
def _create_test_data(self, entity_type, number, domain_id=None,
name_dict=None):
"""Create entity test data
:param entity_type: type of entity to create, e.g. 'user', group' etc.
:param number: number of entities to create,
:param domain_id: if not defined, all users will be created in the
default domain.
:param name_dict: optional dict containing entity number and name pairs
"""
entity_list = []
if domain_id is None:
domain_id = CONF.identity.default_domain_id
name_dict = name_dict or {}
for x in range(number):
# If this index has a name defined in the name_dict, then use it
name = name_dict.get(x, uuid.uuid4().hex)
new_entity = self._create_one_entity(entity_type, domain_id, name)
entity_list.append(new_entity)
return entity_list
def _delete_test_data(self, entity_type, entity_list):
for entity in entity_list:
self._delete_entity(entity_type)(entity['id'])
|
dmerejkowsky/qibuild
|
refs/heads/master
|
python/qisys/test/test_ui.py
|
1
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Just some tests for ui """
import qisys.ui as ui
import pytest
def main():
ui.info(ui.red, "This is a an error message\n",
ui.reset, "And here are the details")
ui.error("could not build")
ui.warning("-j ignored for this generator")
ui.info("building foo")
ui.debug("debug message")
ui.info(ui.brown, "this is brown")
ui.info(ui.bold, ui.brown, "this is bold brown")
ui.info(ui.red, "red is dead")
ui.info(ui.darkred, "darkred is really dead")
ui.info(ui.yellow, "this is yellow")
def test_valid_filename():
# pylint:disable-msg=E1101
with pytest.raises(Exception):
ui.valid_filename("foo/bar")
# pylint:disable-msg=E1101
with pytest.raises(Exception):
ui.valid_filename("..")
ui.valid_filename("foo")
def test_empty_end():
ui.info("[skipped]", end="")
ui.info("Your branch has diverged")
if __name__ == "__main__":
import sys
if "-v" in sys.argv:
ui.CONFIG["verbose"] = True
if "-q" in sys.argv:
ui.CONFIG["quiet"] = True
main()
|
minhlongdo/scipy
|
refs/heads/master
|
benchmarks/benchmarks/linalg_solve_toeplitz.py
|
106
|
"""Benchmark the solve_toeplitz solver (Levinson recursion)
"""
from __future__ import division, absolute_import, print_function
import numpy as np
try:
import scipy.linalg
except ImportError:
pass
from .common import Benchmark
class SolveToeplitz(Benchmark):
params = (
('float64', 'complex128'),
(100, 300, 1000),
('toeplitz', 'generic')
)
param_names = ('dtype', 'n', 'solver')
def setup(self, dtype, n, soltype):
random = np.random.RandomState(1234)
dtype = np.dtype(dtype)
# Sample a random Toeplitz matrix representation and rhs.
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
if dtype == np.complex128:
c = c + 1j*random.rand(n)
r = r + 1j*random.rand(n)
y = y + 1j*random.rand(n)
self.c = c
self.r = r
self.y = y
self.T = scipy.linalg.toeplitz(c, r=r)
def time_solve_toeplitz(self, dtype, n, soltype):
if soltype == 'toeplitz':
scipy.linalg.solve_toeplitz((self.c, self.r), self.y)
else:
scipy.linalg.solve(self.T, self.y)
|
jopohl/urh
|
refs/heads/master
|
src/urh/simulator/SimulatorCounterAction.py
|
1
|
import xml.etree.ElementTree as ET
from urh.simulator.SimulatorItem import SimulatorItem
from urh.simulator.SimulatorRule import SimulatorRuleCondition
from urh.util.Formatter import Formatter
class SimulatorCounterAction(SimulatorItem):
def __init__(self):
super().__init__()
self.start = 1
self.step = 1
self.__value = self.start
@property
def value(self):
return self.__value
def reset_value(self):
self.__value = self.start
def progress_value(self):
self.__value += self.step
def validate(self):
return True
def set_parent(self, value):
if value is not None:
assert value.parent() is None or isinstance(value, SimulatorRuleCondition)
super().set_parent(value)
def to_xml(self):
attrib = {"start": str(self.start), "step": str(self.step)}
return ET.Element("simulator_counter_action", attrib=attrib)
@classmethod
def from_xml(cls, tag):
result = SimulatorCounterAction()
result.start = Formatter.str2val(tag.get("start", "1"), int, 1)
result.step = Formatter.str2val(tag.get("step", "1"), int, 1)
return result
|
khchine5/lino
|
refs/heads/master
|
lino/modlib/weasyprint/__init__.py
|
1
|
# Copyright 2016-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""This plugins installs two build methods for generating
:doc:`printable documents </admin/printing>` using `weasyprint
<http://weasyprint.org/>`__.
Applications which use this plugin must also add `'weasyprint'` to
their :ref:`install_requires`.
This plugins installs a warnings filter for the `cffi.model` module in
order to get rid of a disturbing warning :message:`There are known
rendering problems with Cairo <= 1.14.0` and :message:`@font-face
support needs Pango >= 1.38` issued by weasyprint.
.. (Probably obsolete:) They should also add `'cairocffi<0.7'` (see
:ticket:`1119`) or install it using pip::
$ pip install 'cairocffi<0.7' weasyprint
The build methods defined by this plugin both have the same input
template, whose ending must be :xfile:`.weasy.html`. Both methods
then render the input template through Jinja with the standard context
variables (defined by :meth:`get_printable_context
<lino.core.model.Model.get_printable_context>`. The base build method
:class:`WeasyBuildMethod
<lino.modlib.weasyprint.choicelists.WeasyBuildMethod>` then returns
this HTML output "as is", the other method runs weasyprint over the
HTML file to convert it to a :file:`.pdf` file.
.. autosummary::
:toctree:
choicelists
models
"""
# trying to get rid of disturbing warnings in
# https://travis-ci.org/lino-framework/book/jobs/260560833
import warnings
warnings.filterwarnings(
"ignore", 'There are known rendering problems with Cairo <= 1.14.0')
warnings.filterwarnings(
"ignore", '@font-face support needs Pango >= 1.38')
from lino.api import ad, _
class Plugin(ad.Plugin):
"See :doc:`/dev/plugins`."
verbose_name = _("WeasyPrint")
# def on_ui_init(self, kernel):
# """
# This is being called from
# :meth:`lino.core.kernel.Kernel.kernel_startup`.
# Lino has an automatic and currently not configurable method
# for building Jinja's template loader. It looks for
# a "config" subfolder in the following places:
# - the project directory :attr:`lino.core.site.Site.project_dir`
# - the directories of each installed app
# """
# from .renderer import WeasyRenderer
# self.renderer = WeasyRenderer(self)
|
yongshengwang/hue
|
refs/heads/master
|
desktop/libs/libopenid/setup.py
|
29
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "libopenid",
version = VERSION,
url = 'http://github.com/cloudera/hue',
description = "OPENID Libraries",
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'libopenid=libopenid' },
)
|
abadger/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py
|
66
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
def main():
print(json.dumps(dict(changed=False, failed=True, msg='this collection should be masked by testcoll in the user content root')))
if __name__ == '__main__':
main()
|
ionutbalutoiu/ironic
|
refs/heads/master
|
ironic/common/context.py
|
11
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context
class RequestContext(context.RequestContext):
"""Extends security contexts from the OpenStack common library."""
def __init__(self, auth_token=None, domain_id=None, domain_name=None,
user=None, tenant=None, is_admin=False, is_public_api=False,
read_only=False, show_deleted=False, request_id=None,
roles=None, show_password=True):
"""Stores several additional request parameters:
:param domain_id: The ID of the domain.
:param domain_name: The name of the domain.
:param is_public_api: Specifies whether the request should be processed
without authentication.
:param roles: List of user's roles if any.
:param show_password: Specifies whether passwords should be masked
before sending back to API call.
"""
self.is_public_api = is_public_api
self.domain_id = domain_id
self.domain_name = domain_name
self.roles = roles or []
self.show_password = show_password
super(RequestContext, self).__init__(auth_token=auth_token,
user=user, tenant=tenant,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id=request_id)
def to_dict(self):
return {'auth_token': self.auth_token,
'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'request_id': self.request_id,
'domain_id': self.domain_id,
'roles': self.roles,
'domain_name': self.domain_name,
'show_password': self.show_password,
'is_public_api': self.is_public_api}
@classmethod
def from_dict(cls, values):
values.pop('user', None)
values.pop('tenant', None)
return cls(**values)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.