repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
acsone/odoo | refs/heads/8.0 | addons/email_template/tests/test_mail.py | 190 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, save as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = [p_a_id]
self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect') # todo: check signature
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# 2. Post the comment, get created message
mail_compose.send_mail(cr, uid, [compose_id], {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
group_pigs.refresh()
group_bird.refresh()
message_pigs = group_pigs.message_ids[0]
message_bird = group_bird.message_ids[0]
# Test: subject, body
self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
# Test: partner_ids: p_a_id (default) + 3 newly created partners
# message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
# message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
# partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# partner_ids.append(p_a_id)
# self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
# self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
# ----------------------------------------
# CASE4: test newly introduced partner_to field
# ----------------------------------------
# get already-created partners back
p_b_id = self.res_partner.search(cr, uid, [('email', '=', 'b@b.b')])[0]
p_c_id = self.res_partner.search(cr, uid, [('email', '=', 'c@c.c')])[0]
p_d_id = self.res_partner.search(cr, uid, [('email', '=', 'd@d.d')])[0]
# modify template: use partner_to, use template and email address in email_to to test all features together
user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
email_template.write(cr, uid, [email_template_id], {
'model_id': user_model_id,
'body_html': '${object.login}',
'email_to': '${object.email}, c@c.c',
'partner_to': '%i,%i' % (p_b_id, p_c_id),
'email_cc': 'd@d.d',
})
# patner by email + partner by id (no double)
send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
# Generate messsage with default email and partner on template
mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
@mute_logger('openerp.models')
def test_10_email_templating(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid, context = self.cr, self.uid, {}
# create the email.template on mail.group model
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'email_from': 'Raoul Grosbedon <raoul@example.com>',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': True,
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d',
'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
})
# not force send: email_recipients is not taken into account
msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
self.assertEqual(mail.email_to, 'b@b.b, c@c.c', 'email_template: send_mail: wrong email_to')
self.assertEqual(mail.email_cc, 'd@d.d', 'email_template: send_mail: wrong email_cc')
self.assertEqual(
set([partner.id for partner in mail.recipient_ids]),
set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
'email_template: send_mail: wrong management of partner_to')
# force send: take email_recipients into account
email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
sent_emails = self._build_email_kwargs_list
email_to_lst = [
['b@b.b', 'c@c.c'], ['Administrator <admin@yourcompany.example.com>'],
['Raoul Grosbedon <raoul@raoul.fr>'], ['Bert Tartignole <bert@bert.fr>']]
self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
for email in sent_emails:
self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
|
beatrizjesus/my-first-blog | refs/heads/master | pasta/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py | 87 | import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response, from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(self.controller.conditional_headers(request))
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
|
jesonyang001/qarepo | refs/heads/master | askbot/management/commands/askbot_build_solr_schema.py | 4 | from optparse import make_option
import sys
from django.utils.translation import activate as activate_language
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.template import loader, Context
from haystack.backends.solr_backend import SolrSearchBackend
from haystack.constants import ID, DJANGO_CT, DJANGO_ID, DEFAULT_OPERATOR, DEFAULT_ALIAS
SUPPORTED_LANGUAGES = ['en', 'es', 'ru', 'cn', \
'zn', 'fr', 'it', 'jp', 'ko', 'de']
class Command(BaseCommand):
help = "Generates a Solr schema that reflects the indexes."
base_options = (
make_option("-f", "--filename", action="store", type="string", dest="filename",
help='If provided, directs output to a file instead of stdout.'),
make_option("-u", "--using", action="store", type="string", dest="using", default=DEFAULT_ALIAS,
help='If provided, chooses a connection to work with.'),
make_option("-l", "--language", action="store", type="string", dest="language", default='en',
help='Language to user, in language code format')
)
option_list = BaseCommand.option_list + base_options
def handle(self, *args, **options):
"""Generates a Solr schema that reflects the indexes."""
using = options.get('using')
language = options.get('language')[:2]
activate_language(language)
if language not in SUPPORTED_LANGUAGES:
sys.stderr.write("\n\n")
sys.stderr.write("WARNING: your language: '%s' is not supported in our " % language)
sys.stderr.write("template it will default to English more information in http://wiki.apache.org/solr/LanguageAnalysis")
sys.stderr.write("\n\n")
schema_xml = self.build_template(using=using, language=language)
if options.get('filename'):
self.write_file(options.get('filename'), schema_xml)
else:
self.print_stdout(schema_xml)
def build_context(self, using, language='en'):
from haystack import connections, connection_router
backend = connections[using].get_backend()
if not isinstance(backend, SolrSearchBackend):
raise ImproperlyConfigured("'%s' isn't configured as a SolrEngine)." % backend.connection_alias)
content_field_name, fields = backend.build_schema(connections[using].get_unified_index().all_searchfields())
return Context({
'content_field_name': content_field_name,
'fields': fields,
'default_operator': DEFAULT_OPERATOR,
'ID': ID,
'DJANGO_CT': DJANGO_CT,
'DJANGO_ID': DJANGO_ID,
'language': language,
})
def build_template(self, using, language='en'):
t = loader.get_template('search_configuration/askbotsolr.xml')
c = self.build_context(using=using, language=language)
return t.render(c)
def print_stdout(self, schema_xml):
sys.stderr.write("\n")
sys.stderr.write("\n")
sys.stderr.write("\n")
sys.stderr.write("Save the following output to 'schema.xml' and place it in your Solr configuration directory.\n")
sys.stderr.write("--------------------------------------------------------------------------------------------\n")
sys.stderr.write("\n")
print schema_xml
def write_file(self, filename, schema_xml):
schema_file = open(filename, 'w')
schema_file.write(schema_xml)
schema_file.close()
|
2014c2g12/c2g12 | refs/heads/master | c2wp/exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/test/__init__.py | 2547 | # Dummy file to make this directory a package.
|
adammenges/statsmodels | refs/heads/master | statsmodels/iolib/stata_summary_examples.py | 39 |
""". regress totemp gnpdefl gnp unemp armed pop year
Source | SS df MS Number of obs = 16
-------------+------------------------------ F( 6, 9) = 330.29
Model | 184172402 6 30695400.3 Prob > F = 0.0000
Residual | 836424.129 9 92936.0144 R-squared = 0.9955
-------------+------------------------------ Adj R-squared = 0.9925
Total | 185008826 15 12333921.7 Root MSE = 304.85
------------------------------------------------------------------------------
totemp | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.863 -177.0291 207.1524
gnp | -.0358191 .033491 -1.07 0.313 -.111581 .0399428
unemp | -2.020229 .4883995 -4.14 0.003 -3.125065 -.9153928
armed | -1.033227 .2142741 -4.82 0.001 -1.517948 -.5485049
pop | -.0511045 .2260731 -0.23 0.826 -.5625173 .4603083
year | 1829.151 455.4785 4.02 0.003 798.7873 2859.515
_cons | -3482258 890420.3 -3.91 0.004 -5496529 -1467987
------------------------------------------------------------------------------
"""
#From Stata using Longley dataset as in the test and example for GLM
"""
. glm totemp gnpdefl gnp unemp armed pop year
Iteration 0: log likelihood = -109.61744
Generalized linear models No. of obs = 16
Optimization : ML Residual df = 9
Scale parameter = 92936.01
Deviance = 836424.1293 (1/df) Deviance = 92936.01
Pearson = 836424.1293 (1/df) Pearson = 92936.01
Variance function: V(u) = 1 [Gaussian]
Link function : g(u) = u [Identity]
AIC = 14.57718
Log likelihood = -109.6174355 BIC = 836399.2
------------------------------------------------------------------------------
| OIM
totemp | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.859 -151.3684 181.4917
gnp | -.0358191 .033491 -1.07 0.285 -.1014603 .029822
unemp | -2.020229 .4883995 -4.14 0.000 -2.977475 -1.062984
armed | -1.033227 .2142741 -4.82 0.000 -1.453196 -.6132571
pop | -.0511045 .2260731 -0.23 0.821 -.4941996 .3919906
year | 1829.151 455.4785 4.02 0.000 936.4298 2721.873
_cons | -3482258 890420.3 -3.91 0.000 -5227450 -1737066
------------------------------------------------------------------------------
"""
#RLM Example
"""
. rreg stackloss airflow watertemp acidconc
Huber iteration 1: maximum difference in weights = .48402478
Huber iteration 2: maximum difference in weights = .07083248
Huber iteration 3: maximum difference in weights = .03630349
Biweight iteration 4: maximum difference in weights = .2114744
Biweight iteration 5: maximum difference in weights = .04709559
Biweight iteration 6: maximum difference in weights = .01648123
Biweight iteration 7: maximum difference in weights = .01050023
Biweight iteration 8: maximum difference in weights = .0027233
Robust regression Number of obs = 21
F( 3, 17) = 74.15
Prob > F = 0.0000
------------------------------------------------------------------------------
stackloss | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
airflow | .8526511 .1223835 6.97 0.000 .5944446 1.110858
watertemp | .8733594 .3339811 2.61 0.018 .1687209 1.577998
acidconc | -.1224349 .1418364 -0.86 0.400 -.4216836 .1768139
_cons | -41.6703 10.79559 -3.86 0.001 -64.447 -18.89361
------------------------------------------------------------------------------
""" |
hanlind/nova | refs/heads/master | nova/conf/ephemeral_storage.py | 5 | # Copyright 2015 Huawei Technology corp.
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help="""
Enables/disables LVM ephemeral storage encryption.
"""),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help="""
Cipher-mode string to be used.
The cipher and mode to be used to encrypt ephemeral storage. The set of
cipher-mode combinations available depends on kernel support.
Possible values:
* Any crypto option listed in ``/proc/crypto``.
"""),
cfg.IntOpt('key_size',
default=512,
min=1,
help="""
Encryption key length in bits.
The bit length of the encryption key to be used to encrypt ephemeral storage.
In XTS mode only half of the bits are used for encryption key.
"""),
]
def register_opts(conf):
conf.register_group(ephemeral_storage_encryption_group)
conf.register_opts(ephemeral_storage_encryption_opts,
group=ephemeral_storage_encryption_group)
def list_opts():
return {ephemeral_storage_encryption_group:
ephemeral_storage_encryption_opts}
|
vxvinh1511/djangoproject.com | refs/heads/master | dashboard/models.py | 6 | import ast
import calendar
import datetime
import xmlrpc.client
import feedparser
import requests
from django.conf import settings
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import connections, models
from django_hosts.resolvers import reverse
METRIC_PERIOD_INSTANT = 'instant'
METRIC_PERIOD_DAILY = 'daily'
METRIC_PERIOD_WEEKLY = 'weekly'
METRIC_PERIOD_CHOICES = (
(METRIC_PERIOD_INSTANT, 'Instant'),
(METRIC_PERIOD_DAILY, 'Daily'),
(METRIC_PERIOD_WEEKLY, 'Weekly'),
)
class Category(models.Model):
name = models.CharField(max_length=300)
position = models.PositiveSmallIntegerField(default=1)
class Meta:
verbose_name_plural = 'categories'
def __str__(self):
return self.name
class Metric(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
category = models.ForeignKey(Category, blank=True, null=True,
on_delete=models.SET_NULL)
position = models.PositiveSmallIntegerField(default=1)
data = GenericRelation('Datum')
show_on_dashboard = models.BooleanField(default=True)
show_sparkline = models.BooleanField(default=True)
period = models.CharField(max_length=15, choices=METRIC_PERIOD_CHOICES,
default=METRIC_PERIOD_INSTANT)
unit = models.CharField(max_length=100)
unit_plural = models.CharField(max_length=100)
class Meta:
abstract = True
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("metric-detail", args=[self.slug], host='dashboard')
@property
def display_position(self):
cat_position = -1 if self.category is None else self.category.position
return cat_position, self.position
def gather_data(self, since):
"""
Gather all the data from this metric since a given date.
Returns a list of (timestamp, value) tuples. The timestamp is a Unix
timestamp, coverted from localtime to UTC.
"""
if self.period == METRIC_PERIOD_INSTANT:
return self._gather_data_instant(since)
elif self.period == METRIC_PERIOD_DAILY:
return self._gather_data_periodic(since, 'day')
elif self.period == METRIC_PERIOD_WEEKLY:
return self._gather_data_periodic(since, 'week')
else:
raise ValueError("Unknown period: %s", self.period)
def _gather_data_instant(self, since):
"""
Gather data from an "instant" metric.
Instant metrics change every time we measure them, so they're easy:
just return every single measurement.
"""
data = (self.data.filter(timestamp__gt=since)
.order_by('timestamp')
.values_list('timestamp', 'measurement'))
return [(calendar.timegm(t.timetuple()), m) for (t, m) in data]
def _gather_data_periodic(self, since, period):
"""
Gather data from "periodic" merics.
Period metrics are reset every day/week/month and count up as the period
goes on. Think "commits today" or "new tickets this week".
XXX I'm not completely sure how to deal with this since time zones wreak
havoc, so there's right now a hard-coded offset which doesn't really
scale but works for now.
"""
OFFSET = "2 hours" # HACK!
ctid = ContentType.objects.get_for_model(self).id
c = connections['default'].cursor()
c.execute('''SELECT
DATE_TRUNC(%s, timestamp - INTERVAL %s),
MAX(measurement)
FROM dashboard_datum
WHERE content_type_id = %s
AND object_id = %s
AND timestamp >= %s
GROUP BY 1;''', [period, OFFSET, ctid, self.id, since])
return [(calendar.timegm(t.timetuple()), float(m)) for (t, m) in c.fetchall()]
class TracTicketMetric(Metric):
query = models.TextField()
def fetch(self):
s = xmlrpc.client.ServerProxy(settings.TRAC_RPC_URL)
return len(s.ticket.query(self.query + "&max=0"))
def link(self):
return "%squery?%s&desc=1&order=changetime" % (settings.TRAC_URL, self.query)
class RSSFeedMetric(Metric):
feed_url = models.URLField(max_length=1000)
link_url = models.URLField(max_length=1000)
def fetch(self):
return len(feedparser.parse(requests.get(self.feed_url).text).entries)
def link(self):
return self.link_url
class GithubItemCountMetric(Metric):
"""Example: https://api.github.com/repos/django/django/pulls?state=open"""
api_url = models.URLField(max_length=1000)
link_url = models.URLField(max_length=1000)
def fetch(self):
"""
Request the specified GitHub API URL with 100 items per page. Loop over
the pages until no page left. Return total item count.
"""
count = 0
page = 1
number_of_items_on_page = 101
while number_of_items_on_page >= 100:
r = requests.get(self.api_url, params={
'page': page,
'per_page': 100
})
number_of_items_on_page = len(r.json())
count += number_of_items_on_page
page += 1
return count
def link(self):
return self.link_url
class JenkinsFailuresMetric(Metric):
"""
Track failures of a job/build. Uses the Python flavor of the Jenkins REST
API.
"""
jenkins_root_url = models.URLField(
verbose_name='Jenkins instance root URL',
max_length=1000,
help_text='E.g. http://ci.djangoproject.com/',
)
build_name = models.CharField(
max_length=100,
help_text='E.g. Django Python3',
)
is_success_cnt = models.BooleanField(
default=False,
verbose_name='Should the metric be a value representing success ratio?',
help_text='E.g. if there are 50 tests of which 30 are failing the value of this metric '
'will be 20 (or 40%.)',
)
is_percentage = models.BooleanField(
default=False,
verbose_name='Should the metric be a percentage value?',
help_text='E.g. if there are 50 tests of which 30 are failing the value of this metric '
'will be 60%.',
)
def urljoin(self, *parts):
return '/'.join(p.strip('/') for p in parts)
def _fetch(self):
"""
Actually get the values we are interested in by using the Jenkins REST
API (https://wiki.jenkins-ci.org/display/JENKINS/Remote+access+API)
"""
api_url = self.urljoin(self.link(), 'api/python')
job_desc = requests.get(api_url)
job_dict = ast.literal_eval(job_desc.text)
build_ptr_dict = job_dict['lastCompletedBuild']
build_url = self.urljoin(build_ptr_dict['url'], 'api/python')
build_desc = requests.get(build_url)
build_dict = ast.literal_eval(build_desc.text)
return (build_dict['actions'][4]['failCount'], build_dict['actions'][4]['totalCount'])
def _calculate(self, failures, total):
"""Calculate the metric value."""
if self.is_success_cnt:
value = total - failures
else:
value = failures
if self.is_percentage:
if not total:
return 0
value = (value * 100) / total
return value
def fetch(self):
failures, total = self._fetch()
return self._calculate(failures, total)
def link(self):
return self.urljoin(self.jenkins_root_url, 'job', self.build_name)
class Datum(models.Model):
metric = GenericForeignKey()
content_type = models.ForeignKey(ContentType, related_name='+')
object_id = models.PositiveIntegerField()
timestamp = models.DateTimeField(default=datetime.datetime.now)
measurement = models.BigIntegerField()
class Meta:
ordering = ['-timestamp']
get_latest_by = 'timestamp'
verbose_name_plural = 'data'
def __str__(self):
return "%s at %s: %s" % (self.metric, self.timestamp, self.measurement)
|
gfneto/bitcoin-abe | refs/heads/master | build/lib.linux-x86_64-2.7/Abe/Chain/Namecoin.py | 28 | # Copyright(C) 2014 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
from .Sha256NmcAuxPowChain import Sha256NmcAuxPowChain
from . import SCRIPT_TYPE_UNKNOWN
from ..deserialize import opcodes
class Namecoin(Sha256NmcAuxPowChain):
"""
Namecoin represents name operations in transaction output scripts.
"""
def __init__(chain, **kwargs):
chain.name = 'Namecoin'
chain.code3 = 'NMC'
chain.address_version = '\x34'
chain.magic = '\xf9\xbe\xb4\xfe'
Sha256NmcAuxPowChain.__init__(chain, **kwargs)
_drops = (opcodes.OP_NOP, opcodes.OP_DROP, opcodes.OP_2DROP)
def parse_decoded_txout_script(chain, decoded):
start = 0
pushed = 0
# Tolerate (but ignore for now) name operations.
for i in xrange(len(decoded)):
opcode = decoded[i][0]
if decoded[i][1] is not None or \
opcode == opcodes.OP_0 or \
opcode == opcodes.OP_1NEGATE or \
(opcode >= opcodes.OP_1 and opcode <= opcodes.OP_16):
pushed += 1
elif opcode in chain._drops:
to_drop = chain._drops.index(opcode)
if pushed < to_drop:
break
pushed -= to_drop
start = i + 1
else:
return Sha256NmcAuxPowChain.parse_decoded_txout_script(chain, decoded[start:])
return SCRIPT_TYPE_UNKNOWN, decoded
datadir_conf_file_name = "namecoin.conf"
datadir_rpcport = 8336
|
patgmiller/django-guardian | refs/heads/devel | guardian/templatetags/guardian_tags.py | 14 | """
``django-guardian`` template tags. To use in a template just put the following
*load* tag inside a template::
{% load guardian_tags %}
"""
from __future__ import unicode_literals
from django import template
from django.contrib.auth.models import Group, AnonymousUser
try:
# Django < 1.8
from django.template import get_library
from django.template import InvalidTemplateLibrary
except ImportError:
# Django >= 1.8
from django.template.base import get_library
from django.template.base import InvalidTemplateLibrary
from django.template.defaulttags import LoadNode
from guardian.compat import get_user_model
from guardian.exceptions import NotUserNorGroup
from guardian.core import ObjectPermissionChecker
register = template.Library()
@register.tag
def friendly_load(parser, token):
'''
Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjuction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load friendly_loader %}
{% friendly_load comments webdesign %}
{% if_has_tag render_comment_list %}
{% render_comment_list for obj %}
{% else %}
{% if_has_tag lorem %}
{% lorem %}
{% endif_has_tag %}
{% endif_has_tag %}
'''
bits = token.contents.split()
for taglib in bits[1:]:
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary:
pass
return LoadNode()
class ObjectPermissionsNode(template.Node):
def __init__(self, for_whom, obj, context_var):
self.for_whom = template.Variable(for_whom)
self.obj = template.Variable(obj)
self.context_var = context_var
def render(self, context):
for_whom = self.for_whom.resolve(context)
if isinstance(for_whom, get_user_model()):
self.user = for_whom
self.group = None
elif isinstance(for_whom, AnonymousUser):
self.user = get_user_model().get_anonymous()
self.group = None
elif isinstance(for_whom, Group):
self.user = None
self.group = for_whom
else:
raise NotUserNorGroup("User or Group instance required (got %s)"
% for_whom.__class__)
obj = self.obj.resolve(context)
if not obj:
return ''
check = ObjectPermissionChecker(for_whom)
perms = check.get_perms(obj)
context[self.context_var] = perms
return ''
@register.tag
def get_obj_perms(parser, token):
"""
Returns a list of permissions (as ``codename`` strings) for a given
``user``/``group`` and ``obj`` (Model instance).
Parses ``get_obj_perms`` tag which should be in format::
{% get_obj_perms user/group for obj as "context_var" %}
.. note::
Make sure that you set and use those permissions in same template
block (``{% block %}``).
Example of usage (assuming ``flatpage`` and ``perm`` objects are
available from *context*)::
{% get_obj_perms request.user for flatpage as "flatpage_perms" %}
{% if "delete_flatpage" in flatpage_perms %}
<a href="/pages/delete?target={{ flatpage.url }}">Remove page</a>
{% endif %}
.. note::
Please remember that superusers would always get full list of permissions
for a given object.
.. versionadded:: 1.2
As of v1.2, passing ``None`` as ``obj`` for this template tag won't rise
obfuscated exception and would return empty permissions set instead.
"""
bits = token.split_contents()
format = '{% get_obj_perms user/group for obj as "context_var" %}'
if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as':
raise template.TemplateSyntaxError("get_obj_perms tag should be in "
"format: %s" % format)
for_whom = bits[1]
obj = bits[3]
context_var = bits[5]
if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"):
raise template.TemplateSyntaxError("get_obj_perms tag's context_var "
"argument should be in quotes")
context_var = context_var[1:-1]
return ObjectPermissionsNode(for_whom, obj, context_var)
|
EUDAT-B2SHARE/b2share | refs/heads/master | b2share/modules/upgrade/api.py | 1 | # -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2017 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2Share upgrade api."""
import re
import traceback
import warnings
from collections import namedtuple
from queue import Queue
from functools import wraps
import click
from flask import current_app
from invenio_db import db
from sqlalchemy.orm.attributes import flag_modified
from b2share.version import __version__
from .errors import MigrationFromUnknownVersionError
from .models import Migration
from b2share.utils import get_base_url
def with_request_context(f):
"""Runs the decorated function in a request context."""
@wraps(f)
def decorator(*args, **kwargs):
with current_app.test_request_context('/', base_url=get_base_url()):
f(*args, **kwargs)
return decorator
def upgrade_to_last_version(verbose):
"""Upgrade the database to the last version and reindex the records."""
alembic = current_app.extensions['invenio-db'].alembic
# Remove ".dev*", "rc*", ... from the version to simplify the upgrade
last_version = re.match(r'^\d+\.\d+\.\d+', __version__).group(0)
last_failure = None
# detect data current version. Special case for version 2.0.0 as there was
# no alembic recipe at that time.
if db.engine.dialect.has_table(db.engine, 'transaction'):
if not db.engine.dialect.has_table(db.engine, 'alembic_version'):
current_version = '2.0.0'
else:
all_migrations = Migration.query.order_by(
Migration.updated.desc()).all()
last_migration = all_migrations[0]
if last_migration.success:
if last_migration.version == last_version:
click.secho('Already up to date.')
return
else:
last_failure = last_migration
try:
last_success = next(mig for mig in all_migrations
if mig.success)
current_version = last_success.version
except StopIteration:
current_version = '2.0.0'
else:
current_version = 'init'
upgrades = UpgradeRecipe.build_upgrade_path(current_version,
last_version)
for upgrade in upgrades:
upgrade.run(failed_migration=last_failure, verbose=verbose)
Step = namedtuple('Step', ['condition', 'run'])
def default_step_condition_factory(target_version, step_name):
"""Create a default condition used by upgrade steps.
The created condition checks if the step was already successfully ran
and skip it if it is the case.
"""
def default_step_condition(alembic, failed_migration, *args):
if failed_migration and failed_migration.version == target_version:
try:
step = next(step for step in failed_migration.data['steps']
if step['name'] == step_name)
# skip the step if it already ran successfully
if step['status'] in {'success', 'skip'}:
return False
except StopIteration:
# The step was not run at all.
pass
return True
return default_step_condition
class UpgradeRecipe(object):
"""A B2SHARE upgrade which migrates from one B2SHARE version to another.
An upgrade is composed of Steps which are run sequentially.
Every Step is a function which is replayable if it fails but cannot be
rollbacked once it succeeds.
A failed upgrade can be rerun.
"""
# dict of all upgrades. src_version -> dst_version -> upgrade
upgrades = dict()
loaded = False
"""Flag signaling if the upgrade recipes have been loaded yet."""
def __init__(self, src_version, dst_version):
"""Constructor
Args:
src_version: origin version required to run this upgrade.
dst_version: new B2SHARE version once this upgrade has ran.
"""
self.src_version = src_version
self.dst_version = dst_version
# upgrade step. Each step is a unit which is replayable if it failed.
self.steps = []
self._step_names = set()
# register the new upgrade
src_upgrades = self.upgrades.setdefault(src_version, dict())
assert dst_version not in src_upgrades
src_upgrades[dst_version] = self
def step(self, condition=None):
"""Function decorator registering a step of the upgrade.
Args:
condition: a callable which, if it returns False, makes upgrade.run
skip the step.
"""
def decorator(step_func):
# docstring is mandatory
assert step_func.__doc__ is not None
# check for duplicate step names
assert step_func.__name__ not in self._step_names
final_condition = condition
# Use the default condition if none is provided
if condition is None:
final_condition = default_step_condition_factory(
self.dst_version, step_func.__name__
)
self.steps.append(
Step(condition=final_condition, run=step_func)
)
self._step_names.add(step_func.__name__)
return step_func
return decorator
def remove_step(self, step_func):
"""Remove a step from the list of upgrade steps."""
self.steps = filter(lambda step: step.run != step_func, self.steps)
self._step_names.remove(step_func.__name__)
@classmethod
def load(cls):
"""Load all upgrade recipes."""
import pkgutil
import b2share.modules.upgrade.upgrades as upgrades
for importer, modname, ispkg in pkgutil.walk_packages(
path=upgrades.__path__,
prefix=upgrades.__name__+'.',
onerror=lambda x: None):
__import__(modname)
cls.loaded = True
@with_request_context
def run(self, failed_migration=None, verbose=None):
"""Run the upgrade."""
if not self.loaded:
self.load()
alembic = current_app.extensions['invenio-db'].alembic
migration = Migration(
version = self.dst_version,
data = dict(steps=[], error=None, status='start')
)
# save the migration state
if db.engine.dialect.has_table(db.engine, 'b2share_migrations'):
db.session.add(migration)
db.session.commit()
for step in self.steps:
step_log = dict(
name=step.run.__name__,
status='start'
)
migration.data['steps'].append(step_log)
try:
alembic.migration_context.bind.close()
if step.condition is None or step.condition(alembic,
failed_migration):
if verbose:
click.secho(step.run.__doc__, fg='green')
step.run(alembic, verbose)
step_log['status'] = 'success'
else:
step_log['status'] = 'skip'
except BaseException as e:
db.session.rollback()
migration.data['steps'].append(dict(
name=step.run.__name__,
status='error'
))
migration.data['error'] = traceback.format_exc()
migration.data['status'] = 'error'
if not db.engine.dialect.has_table(db.engine,
'b2share_migrations'):
click.secho(
'Failed to upgrade while running upgrade {0} -> {1}. '
'Step {2}.\nTraceback:\n{3}'.format(
self.src_version, self.dst_version,
step.run.__name__, traceback.format_exc())
)
raise e
finally:
# save the migration state
if db.engine.dialect.has_table(db.engine,
'b2share_migrations'):
flag_modified(migration, 'data')
db.session.add(migration)
db.session.commit()
# mark the migration as successful and save it
migration.data['status'] = 'success'
db.session.add(migration)
flag_modified(migration, 'data')
db.session.commit()
@classmethod
def build_upgrade_path(cls, src_version, dst_version):
"""Build the upgrade path from src_version to dst_version.
The idea is that we might have migrations with different possible
paths.
Example:
v1.0.0 -> v1.0.1 -> v1.0.2
| | |
----> v1.1.0 <-----
Thus we have to find the shortest path from the current version
to the one we target.
Returns:
the list of upgrade to run to migrate from version src_version
to dst_version.
"""
if not cls.loaded:
cls.load()
if src_version == dst_version:
return []
# stop the migration if the current version is unknown.
if src_version not in cls.upgrades:
raise MigrationFromUnknownVersionError(src_version, dst_version)
Branch = namedtuple('branch', ['version', 'upgrades'])
queue = Queue()
branch = Branch(src_version, [])
queue.put(branch)
while not queue.empty():
branch = queue.get()
for new_version in cls.upgrades[branch.version]:
upgrades = branch.upgrades + \
[cls.upgrades[branch.version][new_version]]
if new_version == dst_version:
return upgrades
queue.put(Branch(new_version, upgrades))
def alembic_upgrade(target='heads'):
"""Upgrade the database using alembic.
We use this instead of flask-alembic because this is currently the only
way to use the same connection as our flask-sqlalchemy session. This
enables us to run everything in the same transaction.
This code is inspired from flask-alembic code.
"""
alembic = current_app.extensions['invenio-db'].alembic
env = alembic.environment_context
def do_upgrade(revision, context):
return alembic.script_directory._upgrade_revs(target, revision)
env.configure(
connection=db.session.connection(), target_metadata=db.metadata,
fn=do_upgrade, **current_app.config['ALEMBIC_CONTEXT']
)
with warnings.catch_warnings():
# Ignore the warning comming from invenio-db naming convention recipe
warnings.filterwarnings("ignore",
message='Update \w* CHECK \w* manually')
env.run_migrations()
def alembic_stamp(target='heads'):
"""Stamp the database using alembic.
We use this instead of flask-alembic because this is currently the only
way to use the same connection as our flask-sqlalchemy session. This
enables us to run everything in the same transaction.
This code is inspired from flask-alembic code.
"""
alembic = current_app.extensions['invenio-db'].alembic
env = alembic.environment_context
def do_stamp(revision, context):
return alembic.script_directory._stamp_revs(target, revision)
env.configure(
connection=db.session.connection(), target_metadata=db.metadata,
fn=do_stamp, **current_app.config['ALEMBIC_CONTEXT']
)
env.run_migrations()
|
CG-F16-24-Rutgers/steersuite-rutgers | refs/heads/master | steerstats/tools/deap/gp.py | 8 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`gp` module provides the methods and classes to perform
Genetic Programming with DEAP. It essentially contains the classes to
build a Genetic Program Tree, and the functions to evaluate it.
This module support both strongly and loosely typed GP.
"""
import copy
import random
import re
import sys
import warnings
from collections import defaultdict, deque
from functools import partial, wraps
from inspect import isclass
from operator import eq, lt
######################################
# GP Data structure #
######################################
# Define the name of type for any types.
__type__ = object
class PrimitiveTree(list):
"""Tree spefically formated for optimization of genetic
programming operations. The tree is represented with a
list where the nodes are appended in a depth-first order.
The nodes appended to the tree are required to
have an attribute *arity* which defines the arity of the
primitive. An arity of 0 is expected from terminals nodes.
"""
def __init__(self, content):
list.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
new.__dict__.update(copy.deepcopy(self.__dict__, memo))
return new
def __setitem__(self, key, val):
# Check for most common errors
# Does NOT check for STGP constraints
if isinstance(key, slice):
if key.start >= len(self):
raise IndexError("Invalid slice object (try to assign a %s"
" in a tree of size %d). Even if this is allowed by the"
" list object slice setter, this should not be done in"
" the PrimitiveTree context, as this may lead to an"
" unpredictable behavior for searchSubtree or evaluate."
% (key, len(self)))
total = val[0].arity
for node in val[1:]:
total += node.arity - 1
if total != 0:
raise ValueError("Invalid slice assignation : insertion of"
" an incomplete subtree is not allowed in PrimitiveTree."
" A tree is defined as incomplete when some nodes cannot"
" be mapped to any position in the tree, considering the"
" primitives' arity. For instance, the tree [sub, 4, 5,"
" 6] is incomplete if the arity of sub is 2, because it"
" would produce an orphan node (the 6).")
elif val.arity != self[key].arity:
raise ValueError("Invalid node replacement with a node of a"
" different arity.")
list.__setitem__(self, key, val)
def __str__(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
ret_types = deque()
for token in tokens:
if token == '':
continue
if len(ret_types) != 0:
type_ = ret_types.popleft()
else:
type_ = None
if token in pset.mapping:
primitive = pset.mapping[token]
if len(ret_types) != 0 and primitive.ret != type_:
raise TypeError("Primitive {} return type {} does not "
"match the expected one: {}."
.format(primitive, primitive.ret, type_))
expr.append(primitive)
if isinstance(primitive, Primitive):
ret_types.extendleft(primitive.args)
else:
try:
token = eval(token)
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(token))
if type_ is None:
type_ = type(token)
if type(token) != type_:
raise TypeError("Terminal {} type {} does not "
"match the expected one: {}."
.format(token, type(token), type_))
expr.append(Terminal(token, False, type_))
return cls(expr)
@property
def height(self):
"""Return the height of the tree, or the depth of the
deepest node.
"""
stack = [0]
max_depth = 0
for elem in self:
depth = stack.pop()
max_depth = max(max_depth, depth)
stack.extend([depth+1] * elem.arity)
return max_depth
@property
def root(self):
"""Root of the tree, the element 0 of the list.
"""
return self[0]
def searchSubtree(self, begin):
"""Return a slice object that corresponds to the
range of values that defines the subtree which has the
element with index *begin* as its root.
"""
end = begin + 1
total = self[begin].arity
while total > 0:
total += self[end].arity - 1
end += 1
return slice(begin, end)
class Primitive(object):
"""Class that encapsulates a primitive and when called with arguments it
returns the Python code to call the primitive with the arguments.
>>> pr = Primitive("mul", (int, int), int)
>>> pr.format(1, 2)
'mul(1, 2)'
"""
__slots__ = ('name', 'arity', 'args', 'ret', 'seq')
def __init__(self, name, args, ret):
self.name = name
self.arity = len(args)
self.args = args
self.ret = ret
args = ", ".join(map("{{{0}}}".format, range(self.arity)))
self.seq = "{name}({args})".format(name=self.name, args=args)
def format(self, *args):
return self.seq.format(*args)
class Terminal(object):
"""Class that encapsulates terminal primitive in expression. Terminals can
be values or 0-arity functions.
"""
__slots__ = ('name', 'value', 'ret', 'conv_fct')
def __init__(self, terminal, symbolic, ret):
self.ret = ret
self.value = terminal
self.name = str(terminal)
self.conv_fct = str if symbolic else repr
@property
def arity(self):
return 0
def format(self):
return self.conv_fct(self.value)
class Ephemeral(Terminal):
"""Class that encapsulates a terminal which value is set when the
object is created. To mutate the value, a new object has to be
generated. This is an abstract base class. When subclassing, a
staticmethod 'func' must be defined.
"""
def __init__(self):
Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret)
def regen(self):
"""Regenerate the ephemeral value.
"""
self.value = self.func()
self.name = str(self.value)
@staticmethod
def func():
"""Return a random value used to define the ephemeral state.
"""
raise NotImplementedError
class PrimitiveSetTyped(object):
"""Class that contains the primitives that can be used to solve a
Strongly Typed GP problem. The set also defined the researched
function return type, and input arguments type and number.
"""
def __init__(self, name, in_types, ret_type, prefix="ARG"):
self.terminals = defaultdict(list)
self.primitives = defaultdict(list)
self.arguments = []
# setting "__builtins__" to None avoid the context
# being polluted by builtins function when evaluating
# GP expression.
self.context = {"__builtins__" : None}
self.mapping = dict()
self.terms_count = 0
self.prims_count = 0
self.name = name
self.ret = ret_type
self.ins = in_types
for i, type_ in enumerate(in_types):
arg_str = "{prefix}{index}".format(prefix=prefix, index=i)
self.arguments.append(arg_str)
term = Terminal(arg_str, True, type_)
self._add(term)
self.terms_count += 1
def renameArguments(self, **kargs):
"""Rename function arguments with new names from *kargs*.
"""
for i, old_name in enumerate(self.arguments):
if old_name in kargs:
new_name = kargs[old_name]
self.arguments[i] = new_name
self.mapping[new_name] = self.mapping[old_name]
self.mapping[new_name].value = new_name
del self.mapping[old_name]
def _add(self, prim):
def addType(dict_, ret_type):
if not ret_type in dict_:
dict_[ret_type]
for type_, list_ in dict_.items():
if issubclass(type_, ret_type):
dict_[ret_type].extend(list_)
addType(self.primitives, prim.ret)
addType(self.terminals, prim.ret)
self.mapping[prim.name] = prim
if isinstance(prim, Primitive):
for type_ in prim.args:
addType(self.primitives, type_)
addType(self.terminals, type_)
dict_ = self.primitives
else:
dict_ = self.terminals
for type_ in dict_:
if issubclass(prim.ret, type_):
dict_[type_].append(prim)
def addPrimitive(self, primitive, in_types, ret_type, name=None):
"""Add a primitive to the set.
:param primitive: callable object or a function.
:parma in_types: list of primitives arguments' type
:param ret_type: type returned by the primitive.
:param name: alternative name for the primitive instead
of its __name__ attribute.
"""
if name is None:
name = primitive.__name__
prim = Primitive(name, in_types, ret_type)
assert name not in self.context or \
self.context[name] is primitive, \
"Primitives are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second '%s' primitive." % (name,)
self._add(prim)
self.context[prim.name] = primitive
self.prims_count += 1
def addTerminal(self, terminal, ret_type, name=None):
"""Add a terminal to the set. Terminals can be named
using the optional *name* argument. This should be
used : to define named constant (i.e.: pi); to speed the
evaluation time when the object is long to build; when
the object does not have a __repr__ functions that returns
the code to build the object; when the object class is
not a Python built-in.
:param terminal: Object, or a function with no arguments.
:param ret_type: Type of the terminal.
:param name: defines the name of the terminal in the expression.
"""
symbolic = False
if name is None and callable(terminal):
name = terminal.__name__
assert name not in self.context, \
"Terminals are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second %s terminal." % (name,)
if name is not None:
self.context[name] = terminal
terminal = name
symbolic = True
elif terminal in (True, False):
# To support True and False terminals with Python 2.
self.context[str(terminal)] = terminal
prim = Terminal(terminal, symbolic, ret_type)
self._add(prim)
self.terms_count += 1
def addEphemeralConstant(self, name, ephemeral, ret_type):
"""Add an ephemeral constant to the set. An ephemeral constant
is a no argument function that returns a random value. The value
of the constant is constant for a Tree, but may differ from one
Tree to another.
:param name: name used to refers to this ephemeral type.
:param ephemeral: function with no arguments returning a random value.
:param ret_type: type of the object returned by *ephemeral*.
"""
module_gp = globals()
if not name in module_gp:
class_ = type(name, (Ephemeral,), {'func' : staticmethod(ephemeral),
'ret' : ret_type})
module_gp[name] = class_
else:
class_ = module_gp[name]
if issubclass(class_, Ephemeral):
if class_.func is not ephemeral:
raise Exception("Ephemerals with different functions should "
"be named differently, even between psets.")
elif class_.ret is not ret_type:
raise Exception("Ephemerals with the same name and function "
"should have the same type, even between psets.")
else:
raise Exception("Ephemerals should be named differently "
"than classes defined in the gp module.")
self._add(class_)
self.terms_count += 1
def addADF(self, adfset):
"""Add an Automatically Defined Function (ADF) to the set.
:param adfset: PrimitiveSetTyped containing the primitives with which
the ADF can be built.
"""
prim = Primitive(adfset.name, adfset.ins, adfset.ret)
self._add(prim)
self.prims_count += 1
@property
def terminalRatio(self):
"""Return the ratio of the number of terminals on the number of all
kind of primitives.
"""
return self.terms_count / float(self.terms_count + self.prims_count)
class PrimitiveSet(PrimitiveSetTyped):
"""Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no
definition of type.
"""
def __init__(self, name, arity, prefix="ARG"):
args = [__type__]*arity
PrimitiveSetTyped.__init__(self, name, args, __type__, prefix)
def addPrimitive(self, primitive, arity, name=None):
"""Add primitive *primitive* with arity *arity* to the set.
If a name *name* is provided, it will replace the attribute __name__
attribute to represent/identify the primitive.
"""
assert arity > 0, "arity should be >= 1"
args = [__type__] * arity
PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name)
def addTerminal(self, terminal, name=None):
"""Add a terminal to the set."""
PrimitiveSetTyped.addTerminal(self, terminal, __type__, name)
def addEphemeralConstant(self, name, ephemeral):
"""Add an ephemeral constant to the set."""
PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__)
######################################
# GP Tree compilation functions #
######################################
def compile(expr, pset):
"""Compile the expression *expr*.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param pset: Primitive set against which the expression is compile.
:returns: a function if the primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
code = str(expr)
if len(pset.arguments) > 0:
# This section is a stripped version of the lambdify
# function of SymPy 0.6.6.
args = ",".join(arg for arg in pset.arguments)
code = "lambda {args}: {code}".format(args=args, code=code)
try:
return eval(code, pset.context, {})
except MemoryError:
_, _, traceback = sys.exc_info()
raise MemoryError, ("DEAP : Error in tree evaluation :"
" Python cannot evaluate a tree higher than 90. "
"To avoid this problem, you should use bloat control on your "
"operators. See the DEAP documentation for more information. "
"DEAP will now abort."), traceback
def compileADF(expr, psets):
"""Compile the expression represented by a list of trees. The first
element of the list is the main tree, and the following elements are
automatically defined functions (ADF) that can be called by the first
tree.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param psets: List of primitive sets. Each set corresponds to an ADF
while the last set is associated with the expression
and should contain reference to the preceding ADFs.
:returns: a function if the main primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
adfdict = {}
func = None
for pset, subexpr in reversed(zip(psets, expr)):
pset.context.update(adfdict)
func = compile(subexpr, pset)
adfdict.update({pset.name : func})
return func
######################################
# GP Program generation functions #
######################################
def genFull(pset, min_, max_, type_=__type__):
"""Generate an expression where each leaf has a the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) no return type is enforced.
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, type_)
def genGrow(pset, min_, max_, type_=__type__):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) no return type is enforced.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
return depth == height or \
(depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, type_)
def genHalfAndHalf(pset, min_, max_, type_=__type__):
"""Generate an expression with a PrimitiveSet *pset*.
Half the time, the expression is generated with :func:`~deap.gp.genGrow`,
the other half, the expression is generated with :func:`~deap.gp.genFull`.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) no return type is enforced.
:returns: Either, a full or a grown tree.
"""
method = random.choice((genGrow, genFull))
return method(pset, min_, max_, type_)
def genRamped(pset, min_, max_, type_=__type__):
"""
.. deprecated:: 1.0
The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead.
"""
warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.",
FutureWarning)
return genHalfAndHalf(pset, min_, max_, type_)
def generate(pset, min_, max_, condition, type_=__type__):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param condition: The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) no return type is enforced.
:returns: A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a terminal of type '%s', but there is "\
"none available." % (type_,), traceback
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a primitive of type '%s', but there is "\
"none available." % (type_,), traceback
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth+1, arg))
return expr
######################################
# GP Crossovers #
######################################
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
if ind1.root.ret == __type__:
# Not STGP optimization
types1[__type__] = xrange(1, len(ind1))
types2[__type__] = xrange(1, len(ind2))
common_types = [__type__]
else:
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
type_ = random.choice(list(common_types))
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
def cxOnePointLeafBiased(ind1, ind2, termpb):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind1: First typed tree participating in the crossover.
:param ind2: Second typed tree participating in the crossover.
:param termpb: The probability of chosing a terminal node (leaf).
:returns: A tuple of two typed trees.
When the nodes are strongly typed, the operator makes sure the
second node type corresponds to the first node type.
The parameter *termpb* sets the probability to choose between a terminal
or non-terminal crossover point. For instance, as defined by Koza, non-
terminal primitives are selected for 90% of the crossover points, and
terminals for 10%, so *termpb* should be set to 0.1.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# Determine wether we keep terminals or primitives for each individual
terminal_op = partial(eq, 0)
primitive_op = partial(lt, 0)
arity_op1 = terminal_op if random.random() < termpb else primitive_op
arity_op2 = terminal_op if random.random() < termpb else primitive_op
# List all available primitive or terminal types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
if arity_op1(node.arity):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
if arity_op2(node.arity):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
# Set does not support indexing
type_ = random.sample(common_types, 1)[0]
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# GP Mutations #
######################################
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_)
return individual,
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
if len(individual) < 2:
return individual,
index = random.randrange(1, len(individual))
node = individual[index]
if node.arity == 0: # Terminal
term = random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives[node.ret] if p.args == node.args]
individual[index] = random.choice(prims)
return individual,
def mutEphemeral(individual, mode):
"""This operator works on the constants of the tree *individual*. In
*mode* ``"one"``, it will change the value of one of the individual
ephemeral constants by calling its generator function. In *mode*
``"all"``, it will change the value of **all** the ephemeral constants.
:param individual: The normal or typed tree to be mutated.
:param mode: A string to indicate to change ``"one"`` or ``"all"``
ephemeral constants.
:returns: A tuple of one tree.
"""
if mode not in ["one", "all"]:
raise ValueError("Mode must be one of \"one\" or \"all\"")
ephemerals_idx = [index
for index, node in enumerate(individual)
if isinstance(node, Ephemeral)]
if len(ephemerals_idx) > 0:
if mode == "one":
ephemerals_idx = (random.choice(ephemerals_idx),)
for i in ephemerals_idx:
individual[i].regen()
return individual,
def mutInsert(individual, pset):
"""Inserts a new branch at a random position in *individual*. The subtree
at the chosen position is used as child node of the created subtree, in
that way, it is really an insertion rather than a replacement. Note that
the original subtree will become one of the children of the new primitive
inserted, but not perforce the first (its position is randomly selected if
the new primitive has more than one child).
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
choice = random.choice
# As we want to keep the current node as children of the new one,
# it must accept the return value of the current node
primitives = [p for p in pset.primitives[node.ret] if node.ret in p.args]
if len(primitives) == 0:
return individual,
new_node = choice(primitives)
new_subtree = [None] * len(new_node.args)
position = choice([i for i, a in enumerate(new_node.args) if a == node.ret])
for i, arg_type in enumerate(new_node.args):
if i != position:
term = choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
new_subtree[position:position+1] = individual[slice_]
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
def mutShrink(individual):
"""This operator shrinks the *individual* by chosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:returns: A tuple of one tree.
"""
# We don't want to "shrink" the root
if len(individual) < 3 or individual.height <= 1:
return individual,
iprims = []
for i, node in enumerate(individual[1:], 1):
if isinstance(node, Primitive) and node.ret in node.args:
iprims.append((i, node))
if len(iprims) != 0:
index, prim = random.choice(iprims)
arg_idx = random.choice([i for i, type_ in enumerate(prim.args) if type_ == prim.ret])
rindex = index+1
for _ in range(arg_idx+1):
rslice = individual.searchSubtree(rindex)
subtree = individual[rslice]
rindex += len(subtree)
slice_ = individual.searchSubtree(index)
individual[slice_] = subtree
return individual,
######################################
# GP bloat control decorators #
######################################
def staticLimit(key, max_value):
"""Implement a static limit on some measurement on a GP tree, as defined
by Koza in [Koza1989]. It may be used to decorate both crossover and
mutation operators. When an invalid (over the limit) child is generated,
it is simply replaced by one of its parents, randomly selected.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90 levels (as Python puts a limit on the call stack
depth), because it can ensure that no tree higher than this limit will ever
be accepted in the population, except if it was generated at initialization
time.
:param key: The function to use in order the get the wanted value. For
instance, on a GP tree, ``operator.attrgetter('height')`` may
be used to set a depth limit, and ``len`` to set a size limit.
:param max_value: The maximum value allowed for the given measurement.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
*key* to ``operator.attrgetter('height')`` and *max_value* to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
def graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = node.name if isinstance(node, Primitive) else node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
if __name__ == "__main__":
import doctest
doctest.testmod()
|
sergio-teruel/bank-payment | refs/heads/8.0 | __unported__/account_banking/banking_import_transaction.py | 13 | ##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 Therp BV (<http://therp.nl>).
# (C) 2011 Smile (<http://smile.fr>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from openerp.osv import orm, fields
from openerp import netsvc
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
from openerp.addons.account_banking.parsers import models
from openerp.addons.account_banking.parsers import convert
from openerp.addons.account_banking.wizard import banktools
bt = models.mem_bank_transaction
class banking_import_transaction(orm.Model):
""" orm representation of mem_bank_transaction() for interactive and posthoc
configuration of reconciliation in the bank statement view.
Possible refractoring in OpenERP 6.1:
merge with bank_statement_line, using sparse fields
"""
_name = 'banking.import.transaction'
_description = 'Bank import transaction'
_rec_name = 'transaction'
# This variable is used to match supplier invoices with an invoice date
# after the real payment date. This can occur with online transactions
# (web shops).
# TODO: Convert this to a proper configuration variable
payment_window = datetime.timedelta(days=10)
def _match_costs(self, cr, uid, trans, period_id, account_info, log):
'''
Get or create a costs invoice for the bank and return it with
the payment as seen in the transaction (when not already done).
'''
if not account_info.costs_account_id:
return []
digits = dp.get_precision('Account')(cr)[1]
amount = round(abs(trans.statement_line_id.amount), digits)
# Make sure to be able to pinpoint our costs invoice for later
# matching
reference = '%s.%s: %s' % (trans.statement,
trans.transaction,
trans.reference)
# search supplier invoice
invoice_obj = self.pool.get('account.invoice')
invoice_ids = invoice_obj.search(cr, uid, [
'&',
('type', '=', 'in_invoice'),
('partner_id', 'child_of', account_info.bank_partner_id.id),
('company_id', '=', account_info.company_id.id),
('date_invoice', '=', trans.execution_date),
('reference', '=', reference),
('amount_total', '=', amount),
]
)
if invoice_ids and len(invoice_ids) == 1:
invoice = invoice_obj.browse(cr, uid, invoice_ids)[0]
elif not invoice_ids:
# create supplier invoice
partner_obj = self.pool.get('res.partner')
invoice_lines = [(0, 0, dict(
amount=1,
price_unit=amount,
name=trans.message or trans.reference,
account_id=account_info.costs_account_id.id
))]
invoice_address_id = partner_obj.address_get(
cr, uid, [account_info.bank_partner_id.id], ['invoice']
)
invoice_id = invoice_obj.create(cr, uid, dict(
type='in_invoice',
company_id=account_info.company_id.id,
partner_id=account_info.bank_partner_id.id,
address_invoice_id=invoice_address_id['invoice'],
period_id=period_id,
journal_id=account_info.invoice_journal_id.id,
account_id=(
account_info.bank_partner_id.property_account_payable.id),
date_invoice=trans.execution_date,
reference_type='none',
reference=reference,
name=trans.reference or trans.message,
check_total=amount,
invoice_line=invoice_lines,
))
invoice = invoice_obj.browse(cr, uid, invoice_id)
# Create workflow
invoice_obj.button_compute(cr, uid, [invoice_id],
{'type': 'in_invoice'}, set_total=True)
wf_service = netsvc.LocalService('workflow')
# Move to state 'open'
wf_service.trg_validate(uid, 'account.invoice', invoice.id,
'invoice_open', cr)
# return move_lines to mix with the rest
return [x for x in invoice.move_id.line_id if x.account_id.reconcile]
def _match_invoice(self, cr, uid, trans, move_lines,
partner_ids, bank_account_ids,
log, linked_invoices,
context=None):
'''
Find the invoice belonging to this reference - if there is one
Use the sales journal to check.
Challenges we're facing:
1. The sending or receiving party is not necessarily the same as
the partner the payment relates to.
2. References can be messed up during manual encoding and inexact
matching can link the wrong invoices.
3. Amounts can or can not match the expected amount.
4. Multiple invoices can be paid in one transaction.
.. There are countless more, but these we'll try to address.
Assumptions for matching:
1. There are no payments for invoices not sent. These are dealt
with later on.
2. Debit amounts are either customer invoices or credited supplier
invoices.
3. Credit amounts are either supplier invoices or credited customer
invoices.
4. Payments are either below expected amount or only slightly above
(abs).
5. Payments from partners that are matched, pay their own invoices.
Worst case scenario:
1. No match was made.
No harm done. Proceed with manual matching as usual.
2. The wrong match was made.
Statements are encoded in draft. You will have the opportunity
to manually correct the wrong assumptions.
TODO: REVISE THIS DOC
#Return values:
# old_trans: this function can modify and rebrowse the modified
# transaction.
# move_info: the move_line information belonging to the matched
# invoice
# new_trans: the new transaction when the current one was split.
# This can happen when multiple invoices were paid with a single
# bank transaction.
'''
def eyecatcher(invoice):
'''
Return the eyecatcher for an invoice
'''
if invoice.type.startswith('in_'):
return invoice.name or invoice.number
else:
return invoice.number
def has_id_match(invoice, ref, msg):
'''
Aid for debugging - way more comprehensible than complex
comprehension filters ;-)
Match on ID of invoice (reference, name or number, whatever
available and sensible)
'''
if invoice.reference and len(invoice.reference) > 2:
# Reference always comes first, as it is manually set for a
# reason.
iref = invoice.reference.upper()
if iref in ref or iref in msg:
return True
if invoice.origin and len(invoice.origin) > 2:
iorigin = invoice.origin.upper()
if iorigin in ref or iorigin in msg:
return True
if invoice.type.startswith('in_'):
# Internal numbering, no likely match on number
if invoice.name and len(invoice.name) > 2:
iname = invoice.name.upper()
if iname in ref or iname in msg:
return True
if (invoice.supplier_invoice_number
and len(invoice.supplier_invoice_number) > 2):
supp_ref = invoice.supplier_invoice_number.upper()
if supp_ref in ref or supp_ref in msg:
return True
elif invoice.type.startswith('out_'):
# External id's possible and likely
inum = invoice.number.upper()
if inum in ref or inum in msg:
return True
return False
def _cached(move_line):
# Disabled, we allow for multiple matches in
# the interactive wizard
return False
# '''Check if the move_line has been cached'''
# return move_line.id in linked_invoices
def _cache(move_line, remaining=0.0):
'''Cache the move_line'''
linked_invoices[move_line.id] = remaining
def _remaining(move_line):
'''Return the remaining amount for a previously matched move_line
'''
return linked_invoices[move_line.id]
def _sign(invoice):
'''Return the direction of an invoice'''
return {
'in_invoice': -1,
'in_refund': 1,
'out_invoice': 1,
'out_refund': -1
}[invoice.type]
def is_zero(move_line, total):
return self.pool.get('res.currency').is_zero(
cr, uid, trans.statement_id.currency, total)
digits = dp.get_precision('Account')(cr)[1]
partial = False
# Search invoice on partner
if partner_ids:
candidates = [
x for x in move_lines
if x.partner_id.id in partner_ids and
(convert.str2date(x.date, '%Y-%m-%d') <=
(convert.str2date(trans.execution_date, '%Y-%m-%d') +
self.payment_window))
and (not _cached(x) or _remaining(x))
]
else:
candidates = []
# Next on reference/invoice number. Mind that this uses the invoice
# itself, as the move_line references have been fiddled with on invoice
# creation. This also enables us to search for the invoice number in
# the reference instead of the other way around, as most human
# interventions *add* text.
ref = trans.reference.upper()
msg = trans.message.upper()
if len(candidates) > 1 or not candidates:
# The manual usage of the sales journal creates moves that
# are not tied to invoices. Thanks to Stefan Rijnhart for
# reporting this.
candidates = [
x for x in candidates or move_lines
if (x.invoice and has_id_match(x.invoice, ref, msg) and
convert.str2date(x.invoice.date_invoice, '%Y-%m-%d') <=
(convert.str2date(trans.execution_date, '%Y-%m-%d') +
self.payment_window)
and (not _cached(x) or _remaining(x))
and (not partner_ids
or x.invoice.partner_id.id in partner_ids))
]
# Match on amount expected. Limit this kind of search to known
# partners.
if not candidates and partner_ids:
candidates = [
x for x in move_lines
if (is_zero(x.move_id, ((x.debit or 0.0) - (x.credit or 0.0)) -
trans.statement_line_id.amount)
and convert.str2date(x.date, '%Y-%m-%d') <=
(convert.str2date(trans.execution_date, '%Y-%m-%d') +
self.payment_window)
and (not _cached(x) or _remaining(x))
and x.partner_id.id in partner_ids)
]
move_line = False
if candidates and len(candidates) > 0:
# Now a possible selection of invoices has been found, check the
# amounts expected and received.
#
# TODO: currency coercing
best = [
x for x in candidates
if (is_zero(x.move_id, ((x.debit or 0.0) - (x.credit or 0.0)) -
trans.statement_line_id.amount)
and convert.str2date(x.date, '%Y-%m-%d') <=
(convert.str2date(trans.execution_date, '%Y-%m-%d') +
self.payment_window))
]
if len(best) == 1:
# Exact match
move_line = best[0]
invoice = move_line.invoice
if _cached(move_line):
partial = True
expected = _remaining(move_line)
else:
_cache(move_line)
elif len(candidates) > 1:
# Before giving up, check cache for catching duplicate
# transfers first
paid = [
x for x in move_lines
if x.invoice and has_id_match(x.invoice, ref, msg)
and convert.str2date(x.invoice.date_invoice, '%Y-%m-%d')
<= convert.str2date(trans.execution_date, '%Y-%m-%d')
and (_cached(x) and not _remaining(x))
]
if paid:
log.append(
_('Unable to link transaction id %(trans)s '
'(ref: %(ref)s) to invoice: '
'invoice %(invoice)s was already paid') % {
'trans': '%s.%s' % (trans.statement,
trans.transaction),
'ref': trans.reference,
'invoice': eyecatcher(paid[0].invoice)})
else:
# Multiple matches
# TODO select best bank account in this case
return (trans, self._get_move_info(
cr, uid, [x.id for x in candidates]),
False)
move_line = False
partial = False
elif len(candidates) == 1 and candidates[0].invoice:
# Mismatch in amounts
move_line = candidates[0]
invoice = move_line.invoice
expected = round(_sign(invoice) * invoice.residual, digits)
partial = True
trans2 = None
if move_line and partial:
found = round(trans.statement_line_id.amount, digits)
if abs(expected) == abs(found):
partial = False
# Last partial payment will not flag invoice paid without
# manual assistence
# Stefan: disabled this here for the interactive method
# Handled this with proper handling of partial
# reconciliation and the workflow service
# invoice_obj = self.pool.get('account.invoice')
# invoice_obj.write(cr, uid, [invoice.id], {
# 'state': 'paid'
# })
elif abs(expected) > abs(found):
# Partial payment, reuse invoice
_cache(move_line, expected - found)
if move_line:
account_ids = [
x.id for x in bank_account_ids
if x.partner_id.id == move_line.partner_id.id
]
return (trans, self._get_move_info(
cr, uid, [move_line.id],
account_ids and account_ids[0] or False),
trans2)
return trans, False, False
def _confirm_move(self, cr, uid, transaction_id, context=None):
"""
The line is matched against a move (invoice), so generate a payment
voucher with the write-off settings that the user requested. The move
lines will be generated by the voucher, handling rounding and currency
conversion.
"""
if context is None:
context = {}
statement_line_pool = self.pool.get('account.bank.statement.line')
transaction = self.browse(cr, uid, transaction_id, context)
if not transaction.move_line_id:
if transaction.match_type == 'invoice':
raise orm.except_orm(
_("Cannot link transaction %s with invoice") %
transaction.statement_line_id.name,
(transaction.invoice_ids and
(_("Please select one of the matches in transaction "
"%s.%s") or
_("No match found for transaction %s.%s")) % (
transaction.statement_line_id.statement_id.name,
transaction.statement_line_id.name
)))
else:
raise orm.except_orm(
_("Cannot link transaction %s with accounting entry") %
transaction.statement_line_id.name,
(transaction.move_line_ids and
(_("Please select one of the matches in transaction "
"%s.%s") or
_("No match found for transaction %s.%s")) % (
transaction.statement_line_id.statement_id.name,
transaction.statement_line_id.name
)))
st_line = transaction.statement_line_id
journal = st_line.statement_id.journal_id
if st_line.amount < 0.0:
voucher_type = 'payment'
account_id = (journal.default_debit_account_id and
journal.default_debit_account_id.id or False)
else:
voucher_type = 'receipt'
account_id = (journal.default_credit_account_id and
journal.default_credit_account_id.id or False)
# Use the statement line's date determine the period
ctxt = context.copy()
ctxt['company_id'] = st_line.company_id.id
if 'period_id' in ctxt:
del ctxt['period_id']
period_id = self.pool.get('account.period').find(
cr, uid, st_line.date, context=ctxt)[0]
# Convert the move line amount to the journal currency
move_line_amount = transaction.move_line_id.amount_residual_currency
to_curr_id = (st_line.statement_id.journal_id.currency and
st_line.statement_id.journal_id.currency.id or
st_line.statement_id.company_id.currency_id.id)
from_curr_id = (transaction.move_line_id.currency_id and
transaction.move_line_id.currency_id.id or
st_line.statement_id.company_id.currency_id.id)
if from_curr_id != to_curr_id:
amount_currency = statement_line_pool._convert_currency(
cr, uid, from_curr_id, to_curr_id, move_line_amount,
round=True, date=transaction.move_line_id.date,
context=context)
else:
amount_currency = move_line_amount
# Check whether this is a full or partial reconciliation
if transaction.payment_option == 'with_writeoff':
writeoff = abs(st_line.amount) - abs(amount_currency)
line_amount = abs(amount_currency)
else:
writeoff = 0.0
line_amount = abs(st_line.amount)
# Define the voucher
voucher = {
'journal_id': st_line.statement_id.journal_id.id,
'partner_id': (
st_line.partner_id and st_line.partner_id.id or False),
'company_id': st_line.company_id.id,
'type': voucher_type,
'account_id': account_id,
'amount': abs(st_line.amount),
'writeoff_amount': writeoff,
'payment_option': transaction.payment_option,
'writeoff_acc_id': transaction.writeoff_account_id.id,
'analytic_id': transaction.writeoff_analytic_id.id or False,
'date': st_line.date,
'date_due': st_line.date,
'period_id': period_id,
'payment_rate_currency_id': to_curr_id,
}
# Define the voucher line
vch_line = {
# 'voucher_id': v_id,
'move_line_id': transaction.move_line_id.id,
'reconcile': True,
'amount': line_amount,
'account_id': transaction.move_line_id.account_id.id,
'type': transaction.move_line_id.credit and 'dr' or 'cr',
}
voucher['line_ids'] = [(0, 0, vch_line)]
voucher_id = self.pool.get('account.voucher').create(
cr, uid, voucher, context=context)
statement_line_pool.write(
cr, uid, st_line.id,
{'voucher_id': voucher_id}, context=context)
transaction.refresh()
def _legacy_do_move_unreconcile(self, cr, uid, move_line_ids, currency,
context=None):
"""
Legacy method. Allow for canceling bank statement lines that
were confirmed using earlier versions of the interactive wizard branch.
Undo a reconciliation, removing the given move line ids. If no
meaningful (partial) reconciliation remains, delete it.
:param move_line_ids: List of ids. This will usually be the move
line of an associated invoice or payment, plus optionally the
move line of a writeoff.
:param currency: A res.currency *browse* object to perform math
operations on the amounts.
"""
move_line_obj = self.pool.get('account.move.line')
reconcile_obj = self.pool.get('account.move.reconcile')
is_zero = lambda amount: self.pool.get('res.currency').is_zero(
cr, uid, currency, amount)
move_lines = move_line_obj.browse(cr, uid, move_line_ids,
context=context)
reconcile = (move_lines[0].reconcile_id
or move_lines[0].reconcile_partial_id)
line_ids = [
x.id
for x in reconcile.line_id or reconcile.line_partial_ids
]
for move_line_id in move_line_ids:
line_ids.remove(move_line_id)
if len(line_ids) > 1:
full = is_zero(move_line_obj.get_balance(cr, uid, line_ids))
if full:
line_partial_ids = []
else:
line_partial_ids = list(line_ids)
line_ids = []
reconcile_obj.write(
cr, uid, reconcile.id,
{'line_partial_ids': [(6, 0, line_partial_ids)],
'line_id': [(6, 0, line_ids)],
}, context=context)
else:
reconcile_obj.unlink(cr, uid, reconcile.id, context=context)
for move_line in move_lines:
if move_line.invoice:
# reopening the invoice
netsvc.LocalService('workflow').trg_validate(
uid, 'account.invoice', move_line.invoice.id, 'undo_paid',
cr
)
return True
def _legacy_clear_up_writeoff(self, cr, uid, transaction, context=None):
"""
Legacy method to support upgrades older installations of the
interactive wizard branch. To be removed after 7.0
clear up the writeoff move
"""
if transaction.writeoff_move_line_id:
move_pool = self.pool.get('account.move')
move_pool.button_cancel(
cr, uid, [transaction.writeoff_move_line_id.move_id.id],
context=context)
move_pool.unlink(
cr, uid, [transaction.writeoff_move_line_id.move_id.id],
context=context)
return True
def _legacy_cancel_move(self, cr, uid, transaction, context=None):
"""
Legacy method to support upgrades from older installations
of the interactive wizard branch.
Undo the reconciliation of a transaction with a move line
in the system: Retrieve the move line from the bank statement line's
move that is reconciled with the matching move line recorded
on the transaction. Do not actually remove the latter from the
reconciliation, as it may be further reconciled.
Unreconcile the bank statement move line and the optional
write-off move line
"""
statement_line_obj = self.pool.get('account.bank.statement.line')
currency = transaction.statement_line_id.statement_id.currency
reconcile_id = (
transaction.move_line_id.reconcile_id and
transaction.move_line_id.reconcile_id.id or
transaction.move_line_id.reconcile_partial_id and
transaction.move_line_id.reconcile_partial_id.id
)
move_lines = []
for move in transaction.statement_line_id.move_ids:
move_lines += move.line_id
for line in move_lines:
line_reconcile = line.reconcile_id or line.reconcile_partial_id
if line_reconcile and line_reconcile.id == reconcile_id:
st_line_line = line
break
line_ids = [st_line_line.id]
# Add the write off line
if transaction.writeoff_move_line_id:
line_ids.append(transaction.writeoff_move_line_id.id)
self._legacy_do_move_unreconcile(
cr, uid, line_ids, currency, context=context)
statement_line_obj.write(
cr, uid, transaction.statement_line_id.id,
{'reconcile_id': False}, context=context)
def _cancel_voucher(self, cr, uid, transaction_id, context=None):
voucher_pool = self.pool.get('account.voucher')
transaction = self.browse(cr, uid, transaction_id, context=context)
st_line = transaction.statement_line_id
if transaction.match_type:
if st_line.voucher_id:
# Although vouchers can be associated with statement lines
# in standard OpenERP, we consider ourselves owner of the
# voucher if the line has an associated transaction
# Upon canceling of the statement line/transaction,
# we cancel and delete the vouchers.
# Otherwise, the statement line will leave the voucher
# unless the statement line itself is deleted.
voucher_pool.cancel_voucher(
cr, uid, [st_line.voucher_id.id], context=context)
voucher_pool.action_cancel_draft(
cr, uid, [st_line.voucher_id.id], context=context)
voucher_pool.unlink(
cr, uid, [st_line.voucher_id.id], context=context)
if (transaction.move_line_id
and transaction.move_line_id.invoice):
# reopening the invoice
netsvc.LocalService('workflow').trg_validate(
uid, 'account.invoice',
transaction.move_line_id.invoice.id, 'undo_paid', cr)
# Allow canceling of legacy entries
if not st_line.voucher_id and st_line.reconcile_id:
self._legacy_cancel_move(cr, uid, transaction, context=context)
return True
cancel_map = {
'invoice': _cancel_voucher,
'manual': _cancel_voucher,
'move': _cancel_voucher,
}
def cancel(self, cr, uid, ids, context=None):
if ids and isinstance(ids, (int, float)):
ids = [ids]
for transaction in self.browse(cr, uid, ids, context):
if not transaction.match_type:
continue
if transaction.match_type not in self.cancel_map:
raise orm.except_orm(
_("Cannot cancel type %s" % transaction.match_type),
_("No method found to cancel this type"))
self.cancel_map[transaction.match_type](
self, cr, uid, transaction.id, context)
self._legacy_clear_up_writeoff(
cr, uid, transaction, context=context
)
return True
confirm_map = {
'invoice': _confirm_move,
'manual': _confirm_move,
'move': _confirm_move,
}
def confirm(self, cr, uid, ids, context=None):
if ids and isinstance(ids, (int, float)):
ids = [ids]
for transaction in self.browse(cr, uid, ids, context):
if not transaction.match_type:
continue
if transaction.match_type not in self.confirm_map:
raise orm.except_orm(
_("Cannot reconcile"),
_("Cannot reconcile type %s. No method found to " +
"reconcile this type") %
transaction.match_type
)
if (transaction.residual and transaction.writeoff_account_id):
if transaction.match_type not in ('invoice', 'move', 'manual'):
raise orm.except_orm(
_("Cannot reconcile"),
_("Bank transaction %s: write off not implemented for "
"this match type.") %
transaction.statement_line_id.name
)
# Generalize this bit and move to the confirmation
# methods that actually do create a voucher?
self.confirm_map[transaction.match_type](
self, cr, uid, transaction.id, context)
return True
signal_duplicate_keys = [
# does not include float values
# such as transferred_amount
'execution_date', 'local_account', 'remote_account',
'remote_owner', 'reference', 'message',
]
def create(self, cr, uid, vals, context=None):
"""
Search for duplicates of the newly created transaction
and mark them as such unless a context key
'transaction_no_duplicate_search' is defined and true.
"""
res = super(banking_import_transaction, self).create(
cr, uid, vals, context)
if res and not context.get('transaction_no_duplicate_search'):
me = self.browse(cr, uid, res, context)
search_vals = [(key, '=', me[key])
for key in self.signal_duplicate_keys]
ids = self.search(cr, uid, search_vals, context=context)
dupes = []
# Test for transferred_amount seperately
# due to float representation and rounding difficulties
for trans in self.browse(cr, uid, ids, context=context):
if self.pool.get('res.currency').is_zero(
cr, uid,
trans.statement_id.currency,
me['transferred_amount'] - trans.transferred_amount):
dupes.append(trans.id)
if len(dupes) < 1:
raise orm.except_orm(_('Cannot check for duplicate'),
_("Cannot check for duplicate. "
"I can't find myself."))
if len(dupes) > 1:
self.write(
cr, uid, res, {'duplicate': True}, context=context)
return res
def split_off(self, cr, uid, res_id, amount, context=None):
# todo. Inherit the duplicate marker from res_id
pass
def combine(self, cr, uid, ids, context=None):
# todo. Check equivalence of primary key
pass
def _get_move_info(self, cr, uid, move_line_ids, partner_bank_id=False,
partial=False, match_type=False, context=None):
type_map = {
'out_invoice': 'customer',
'in_invoice': 'supplier',
'out_refund': 'customer',
'in_refund': 'supplier',
}
retval = {'partner_id': False,
'partner_bank_id': partner_bank_id,
'reference': False,
'type': 'general',
'move_line_ids': move_line_ids,
'match_type': match_type,
'account_id': False,
}
move_lines = self.pool.get('account.move.line').browse(
cr, uid, move_line_ids, context=context
)
for move_line in move_lines:
if move_line.partner_id:
if retval['partner_id']:
if retval['partner_id'] != move_line.partner_id.id:
retval['partner_id'] = False
break
else:
retval['partner_id'] = move_line.partner_id.id
else:
if retval['partner_id']:
retval['partner_id'] = False
break
for move_line in move_lines:
if move_line.account_id:
if retval['account_id']:
if retval['account_id'] != move_line.account_id.id:
retval['account_id'] = False
break
else:
retval['account_id'] = move_line.account_id.id
else:
if retval['account_id']:
retval['account_id'] = False
break
for move_line in move_lines:
if move_line.invoice:
if retval['match_type']:
if retval['match_type'] != 'invoice':
retval['match_type'] = False
break
else:
retval['match_type'] = 'invoice'
else:
if retval['match_type']:
retval['match_type'] = False
break
if move_lines and not retval['match_type']:
retval['match_type'] = 'move'
if move_lines and len(move_lines) == 1:
retval['reference'] = move_lines[0].ref
if retval['match_type'] == 'invoice':
retval['invoice_ids'] = list(set(x.invoice.id for x in move_lines))
retval['type'] = type_map[move_lines[0].invoice.type]
return retval
def move_info2values(self, move_info):
vals = {}
vals['match_type'] = move_info['match_type']
vals['move_line_ids'] = [(6, 0, move_info.get('move_line_ids') or [])]
vals['invoice_ids'] = [(6, 0, move_info.get('invoice_ids') or [])]
vals['move_line_id'] = (move_info.get('move_line_ids', False) and
len(move_info['move_line_ids']) == 1 and
move_info['move_line_ids'][0])
if move_info['match_type'] == 'invoice':
vals['invoice_id'] = (move_info.get('invoice_ids', False) and
len(move_info['invoice_ids']) == 1 and
move_info['invoice_ids'][0])
return vals
def hook_match_payment(self, cr, uid, transaction, log, context=None):
"""
To override in module 'account_banking_payment'
"""
return False
def match(self, cr, uid, ids, results=None, context=None):
if not ids:
return True
company_obj = self.pool.get('res.company')
partner_bank_obj = self.pool.get('res.partner.bank')
journal_obj = self.pool.get('account.journal')
move_line_obj = self.pool.get('account.move.line')
payment_line_obj = self.pool.get('payment.line')
has_payment = bool(
payment_line_obj and 'date_done' in payment_line_obj._columns)
statement_line_obj = self.pool.get('account.bank.statement.line')
statement_obj = self.pool.get('account.bank.statement')
imported_statement_ids = []
# Results
if results is None:
results = dict(
trans_loaded_cnt=0,
trans_skipped_cnt=0,
trans_matched_cnt=0,
bank_costs_invoice_cnt=0,
error_cnt=0,
log=[],
)
# Caching
error_accounts = {}
info = {}
linked_payments = {}
# TODO: harvest linked invoices from draft statement lines?
linked_invoices = {}
payment_lines = []
# Get all unreconciled sent payment lines in one big swoop.
# No filtering can be done, as empty dates carry value for C2B
# communication. Most likely there are much less sent payments
# than reconciled and open/draft payments.
# Strangely, payment_orders still do not have company_id
if has_payment:
payment_line_ids = payment_line_obj.search(
cr, uid, [
('order_id.state', '=', 'sent'),
('date_done', '=', False)
], context=context)
payment_lines = payment_line_obj.browse(
cr, uid, payment_line_ids)
# Start the loop over the transactions requested to match
transactions = self.browse(cr, uid, ids, context)
# TODO: do we do injected transactions here?
injected = []
i = 0
max_trans = len(transactions)
while i < max_trans:
move_info = False
if injected:
# Force FIFO behavior
transaction = injected.pop(0)
else:
transaction = transactions[i]
if transaction.local_account in error_accounts:
results['trans_skipped_cnt'] += 1
if not injected:
i += 1
continue
partner_banks = []
partner_ids = []
# TODO: optimize by ordering transactions per company,
# and perform the stanza below only once per company.
# In that case, take newest transaction date into account
# when retrieving move_line_ids below.
company = company_obj.browse(
cr, uid, transaction.company_id.id, context)
# Get interesting journals once
# Added type 'general' to capture fund transfers
journal_ids = journal_obj.search(
cr, uid, [
('type', 'in', ('general', 'sale', 'purchase',
'purchase_refund', 'sale_refund')),
('company_id', '=', company.id),
],
)
# Get all unreconciled moves
move_line_ids = move_line_obj.search(
cr, uid, [
('reconcile_id', '=', False),
('journal_id', 'in', journal_ids),
('account_id.reconcile', '=', True),
('date', '<=', transaction.execution_date),
],
)
if move_line_ids:
move_lines = move_line_obj.browse(cr, uid, move_line_ids)
else:
move_lines = []
# Create fallback currency code
currency_code = (transaction.local_currency
or company.currency_id.name)
# Check cache for account info/currency
if transaction.local_account in info and \
currency_code in info[transaction.local_account]:
account_info = info[transaction.local_account][currency_code]
else:
# Pull account info/currency
account_info = banktools.get_company_bank_account(
self.pool, cr, uid, transaction.local_account,
transaction.local_currency, company, results['log']
)
if not account_info:
results['log'].append(
_('Transaction found for unknown account '
'%(bank_account)s') %
{'bank_account': transaction.local_account}
)
error_accounts[transaction.local_account] = True
results['error_cnt'] += 1
if not injected:
i += 1
continue
if 'journal_id' not in account_info.keys():
results['log'].append(
_('Transaction found for account %(bank_account)s, '
'but no default journal was defined.'
) % {'bank_account': transaction.local_account}
)
error_accounts[transaction.local_account] = True
results['error_cnt'] += 1
if not injected:
i += 1
continue
# Get required currency code
currency_code = account_info.currency_id.name
# Cache results
if transaction.local_account not in info:
info[transaction.local_account] = {
currency_code: account_info
}
else:
info[transaction.local_account][currency_code] = (
account_info
)
# Link accounting period
period_id = banktools.get_period(
self.pool, cr, uid, transaction.execution_date,
company, results['log'])
if not period_id:
results['trans_skipped_cnt'] += 1
if not injected:
i += 1
continue
if transaction.statement_line_id:
if transaction.statement_line_id.state == 'confirmed':
raise orm.except_orm(
_("Cannot perform match"),
_("Cannot perform match on a confirmed transction"))
else:
values = {
'name': '%s.%s' % (transaction.statement,
transaction.transaction),
'date': transaction.execution_date,
'amount': transaction.transferred_amount,
'statement_id': transaction.statement_id.id,
'note': transaction.message,
'ref': transaction.reference,
'period_id': period_id,
'currency': account_info.currency_id.id,
'import_transaction_id': transaction.id,
'account_id': (
transaction.transferred_amount < 0 and
account_info.default_credit_account_id.id or
account_info.default_debit_account_id.id),
}
statement_line_id = statement_line_obj.create(
cr, uid, values, context
)
results['trans_loaded_cnt'] += 1
transaction.write({'statement_line_id': statement_line_id})
transaction.refresh()
if transaction.statement_id.id not in imported_statement_ids:
imported_statement_ids.append(transaction.statement_id.id)
# Final check: no coercion of currencies!
if transaction.local_currency \
and account_info.currency_id.name != transaction.local_currency:
# TODO: convert currencies?
results['log'].append(
_('transaction %(statement_id)s.%(transaction_id)s for '
'account %(bank_account)s uses different currency than '
'the defined bank journal.'
) % {
'bank_account': transactions.local_account,
'statement_id': transaction.statement,
'transaction_id': transaction.transaction,
}
)
error_accounts[transaction.local_account] = True
results['error_cnt'] += 1
if not injected:
i += 1
continue
# When bank costs are part of transaction itself, split it.
if (transaction.type != bt.BANK_COSTS
and transaction.provision_costs):
# Create new transaction for bank costs
cost_id = self.copy(
cr, uid, transaction.id,
dict(
type=bt.BANK_COSTS,
transaction='%s-prov' % transaction.transaction,
transferred_amount=transaction.provision_costs,
remote_currency=transaction.provision_costs_currency,
message=transaction.provision_costs_description,
parent_id=transaction.id,
), context)
injected.append(self.browse(cr, uid, cost_id, context))
# Remove bank costs from current transaction
# Note that this requires that the transferred_amount
# includes the bank costs and that the costs itself are
# signed correctly.
self.write(
cr, uid, transaction.id,
dict(
transferred_amount=(
transaction.transferred_amount -
transaction.provision_costs),
provision_costs=False,
provision_costs_currency=False,
provision_costs_description=False,
), context=context)
# rebrowse the current record after writing
transaction = self.browse(
cr, uid, transaction.id, context=context
)
# Match payment and direct debit orders
move_info_payment = self.hook_match_payment(
cr, uid, transaction, results['log'], context=context)
if move_info_payment:
move_info = move_info_payment
# Allow inclusion of generated bank invoices
if transaction.type == bt.BANK_COSTS:
lines = self._match_costs(
cr, uid, transaction, period_id, account_info,
results['log']
)
results['bank_costs_invoice_cnt'] += bool(lines)
for line in lines:
if not [x for x in move_lines if x.id == line.id]:
move_lines.append(line)
partner_ids = [account_info.bank_partner_id.id]
else:
# Link remote partner, import account when needed
partner_banks = banktools.get_bank_accounts(
self.pool, cr, uid, transaction.remote_account,
results['log'], fail=True
)
if partner_banks:
partner_ids = [x.partner_id.id for x in partner_banks]
elif transaction.remote_owner:
country_id = banktools.get_country_id(
self.pool, cr, uid, transaction, context=context)
partner_id = banktools.get_partner(
self.pool, cr, uid, transaction.remote_owner,
transaction.remote_owner_address,
transaction.remote_owner_postalcode,
transaction.remote_owner_city,
country_id, results['log'],
context=context)
if partner_id:
partner_ids = [partner_id]
if transaction.remote_account:
partner_bank_id = banktools.create_bank_account(
self.pool, cr, uid, partner_id,
transaction.remote_account,
transaction.remote_owner,
transaction.remote_owner_address,
transaction.remote_owner_city,
country_id, bic=transaction.remote_bank_bic,
context=context)
partner_banks = partner_bank_obj.browse(
cr, uid, [partner_bank_id], context=context)
# Credit means payment... isn't it?
if (not move_info
and transaction.statement_line_id.amount < 0
and payment_lines):
# Link open payment - if any
# Note that _match_payment is defined in the
# account_banking_payment module which should be installed
# automatically if account_payment is. And if account_payment
# is not installed, then payment_lines will be empty.
move_info = self._match_payment(
cr, uid, transaction,
payment_lines, partner_ids,
partner_banks, results['log'], linked_payments,
)
# Second guess, invoice -> may split transaction, so beware
if not move_info:
# Link invoice - if any. Although bank costs are not an
# invoice, automatic invoicing on bank costs will create
# these, and invoice matching still has to be done.
transaction, move_info, remainder = self._match_invoice(
cr, uid, transaction, move_lines, partner_ids,
partner_banks, results['log'], linked_invoices,
context=context)
if remainder:
injected.append(self.browse(cr, uid, remainder, context))
account_id = move_info and move_info.get('account_id', False)
if not account_id:
# Use the default settings, but allow individual partner
# settings to overrule this.
bank_partner = (
partner_banks[0].partner_id if len(partner_banks) == 1
else False)
if transaction.statement_line_id.amount < 0:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_decr()[bank_partner.id]
else:
account_id = account_info.default_credit_account_id.id
else:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_incr()[bank_partner.id]
else:
account_id = account_info.default_debit_account_id.id
values = {'account_id': account_id}
self_values = {}
if move_info:
results['trans_matched_cnt'] += 1
self_values.update(
self.move_info2values(move_info))
# values['match_type'] = move_info['match_type']
values['partner_id'] = move_info['partner_id']
values['partner_bank_id'] = move_info['partner_bank_id']
values['type'] = move_info['type']
else:
values['partner_id'] = values['partner_bank_id'] = False
if (not values['partner_id']
and partner_ids
and len(partner_ids) == 1):
values['partner_id'] = partner_ids[0]
if (not values['partner_bank_id']
and partner_banks
and len(partner_banks) == 1):
values['partner_bank_id'] = partner_banks[0].id
statement_line_obj.write(
cr, uid, transaction.statement_line_id.id, values, context)
self.write(cr, uid, transaction.id, self_values, context)
if not injected:
i += 1
# recompute statement end_balance for validation
if imported_statement_ids:
statement_obj.button_dummy(
cr, uid, imported_statement_ids, context=context)
def _get_residual(self, cr, uid, ids, name, args, context=None):
"""
Calculate the residual against the candidate reconciliation.
When
55 debiteuren, 50 binnen: amount > 0, residual > 0
-55 crediteuren, -50 binnen: amount = -60 residual -55 - -50
- residual > 0 and transferred amount > 0, or
- residual < 0 and transferred amount < 0
the result is a partial reconciliation. In the other cases,
a new statement line can be split off.
We should give users the option to reconcile with writeoff
or partial reconciliation / new statement line
"""
if not ids:
return {}
res = dict([(x, False) for x in ids])
for transaction in self.browse(cr, uid, ids, context):
if (transaction.statement_line_id.state == 'draft' and
not(transaction.move_currency_amount is False)):
res[transaction.id] = (
transaction.move_currency_amount -
transaction.statement_line_id.amount
)
return res
def _get_match_multi(self, cr, uid, ids, name, args, context=None):
"""
Indicate in the wizard that multiple matches have been found
and that the user has not yet made a choice between them.
"""
if not ids:
return {}
res = dict([(x, False) for x in ids])
for transaction in self.browse(cr, uid, ids, context):
if transaction.match_type == 'move':
if transaction.move_line_ids and not transaction.move_line_id:
res[transaction.id] = True
elif transaction.match_type == 'invoice':
if transaction.invoice_ids and not transaction.invoice_id:
res[transaction.id] = True
return res
def clear_and_write(self, cr, uid, ids, vals=None, context=None):
"""
Write values in argument 'vals', but clear all match
related values first
"""
write_vals = (
dict([
(x, False)
for x in [
'match_type',
'move_line_id',
'invoice_id',
]
] + [
(x, [(6, 0, [])])
for x in [
'move_line_ids',
'invoice_ids',
]
]
)
)
write_vals.update(vals or {})
return self.write(cr, uid, ids, write_vals, context=context)
def _get_move_amount(self, cr, uid, ids, name, args, context=None):
"""
Need to get the residual amount on the move (invoice) in the bank
statement currency.
This will be used to calculate the write-off amount
(in statement currency).
"""
if not ids:
return {}
res = dict([(x, False) for x in ids])
stline_pool = self.pool.get('account.bank.statement.line')
for transaction in self.browse(cr, uid, ids, context):
if transaction.move_line_id:
move_line_amount = (
transaction.move_line_id.amount_residual_currency)
statement = transaction.statement_line_id.statement_id
to_curr_id = (
statement.journal_id.currency
and statement.journal_id.currency.id
or statement.company_id.currency_id.id
)
from_curr_id = (
transaction.move_line_id.currency_id
and transaction.move_line_id.currency_id.id
or statement.company_id.currency_id.id
)
if from_curr_id != to_curr_id:
amount_currency = stline_pool._convert_currency(
cr, uid, from_curr_id, to_curr_id, move_line_amount,
round=True, date=transaction.statement_line_id.date,
context=context
)
else:
amount_currency = move_line_amount
sign = 1
if transaction.move_line_id.currency_id:
if transaction.move_line_id.amount_currency < 0:
sign = -1
else:
if (transaction.move_line_id.debit
- transaction.move_line_id.credit) < 0:
sign = -1
res[transaction.id] = sign * amount_currency
return res
def unlink(self, cr, uid, ids, context=None):
"""
Unsplit if this if a split transaction
"""
for this in self.browse(cr, uid, ids, context):
if this.parent_id:
this.parent_id.write(
{
'transferred_amount':
this.parent_id.transferred_amount +
this.transferred_amount,
}
)
this.parent_id.refresh()
return super(banking_import_transaction, self).unlink(
cr, uid, ids, context=context)
column_map = {
# used in bank_import.py, converting non-osv transactions
'statement_id': 'statement',
'id': 'transaction'
}
_columns = {
# start mem_bank_transaction atributes
# see parsers/models.py
'transaction': fields.char('transaction', size=16), # id
'statement': fields.char('statement', size=16), # statement_id
'type': fields.char('type', size=16),
'reference': fields.char('reference', size=1024),
'local_account': fields.char('local_account', size=24),
'local_currency': fields.char('local_currency', size=16),
'execution_date': fields.date('Posted date'),
'value_date': fields.date('Value date'),
'remote_account': fields.char('remote_account', size=24),
'remote_currency': fields.char('remote_currency', size=16),
'exchange_rate': fields.float('exchange_rate'),
'transferred_amount': fields.float('transferred_amount'),
'message': fields.char('message', size=1024),
'remote_owner': fields.char('remote_owner', size=128),
'remote_owner_address': fields.char('remote_owner_address', size=256),
'remote_owner_city': fields.char('remote_owner_city', size=128),
'remote_owner_postalcode': fields.char(
'remote_owner_postalcode',
size=24,
),
'remote_owner_country_code': fields.char(
'remote_owner_country_code',
size=24,
),
'remote_owner_custno': fields.char('remote_owner_custno', size=24),
'remote_bank_bic': fields.char('remote_bank_bic', size=24),
'remote_bank_bei': fields.char('remote_bank_bei', size=24),
'remote_bank_ibei': fields.char('remote_bank_ibei', size=24),
'remote_bank_eangl': fields.char('remote_bank_eangln', size=24),
'remote_bank_chips_uid': fields.char('remote_bank_chips_uid', size=24),
'remote_bank_duns': fields.char('remote_bank_duns', size=24),
'remote_bank_tax_id': fields.char('remote_bank_tax_id', size=24),
'provision_costs': fields.float('provision_costs', size=24),
'provision_costs_currency': fields.char(
'provision_costs_currency',
size=64,
),
'provision_costs_description': fields.char(
'provision_costs_description',
size=24,
),
'error_message': fields.char('error_message', size=1024),
'storno_retry': fields.boolean('storno_retry'),
# end of mem_bank_transaction_fields
'bank_country_code': fields.char(
'Bank country code', size=2,
help=("Fallback default country for new partner records, "
"as defined by the import parser"),
readonly=True,),
'company_id': fields.many2one(
'res.company', 'Company', required=True),
'duplicate': fields.boolean('duplicate'),
'statement_line_id': fields.many2one(
'account.bank.statement.line', 'Statement line',
ondelete='cascade'),
'statement_id': fields.many2one(
'account.bank.statement', 'Statement',
ondelete='CASCADE'),
'parent_id': fields.many2one(
'banking.import.transaction', 'Split off from this transaction'),
# match fields
'match_type': fields.selection([
('move', 'Move'),
('invoice', 'Invoice'),
('payment', 'Payment line'),
('payment_order', 'Payment order'),
('storno', 'Storno'),
('manual', 'Manual'),
('payment_manual', 'Payment line (manual)'),
('payment_order_manual', 'Payment order (manual)'),
], 'Match type'),
'match_multi': fields.function(
_get_match_multi, method=True, string='Multi match',
type='boolean'),
'move_line_ids': fields.many2many(
'account.move.line', 'banking_transaction_move_line_rel',
'move_line_id', 'transaction_id', 'Matching entries'),
'move_line_id': fields.many2one(
'account.move.line', 'Entry to reconcile'),
'invoice_ids': fields.many2many(
'account.invoice', 'banking_transaction_invoice_rel',
'invoice_id', 'transaction_id', 'Matching invoices'),
'invoice_id': fields.many2one(
'account.invoice', 'Invoice to reconcile'),
'residual': fields.function(
_get_residual, method=True, string='Residual', type='float'),
'writeoff_account_id': fields.many2one(
'account.account',
'Write-off account',
domain=[('type', '!=', 'view')]
),
'payment_option': fields.selection(
[
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance')
],
'Payment Difference',
required=True,
help=("This field helps you to choose what you want to do with "
"the eventual difference between the paid amount and the "
"sum of allocated amounts. You can either choose to keep "
"open this difference on the partner's account, "
"or reconcile it with the payment(s)"),
),
'writeoff_amount': fields.float('Difference Amount'),
# Legacy field: to be removed after 7.0
'writeoff_move_line_id': fields.many2one(
'account.move.line', 'Write off move line'),
'writeoff_analytic_id': fields.many2one(
'account.analytic.account', 'Write off analytic account'),
'move_currency_amount': fields.function(
_get_move_amount,
method=True,
string='Match Amount',
type='float',
),
}
_defaults = {
'company_id': lambda s, cr, uid, c:
s.pool.get('res.company')._company_default_get(
cr, uid, 'bank.import.transaction', context=c),
'payment_option': 'without_writeoff',
}
class account_bank_statement_line(orm.Model):
_inherit = 'account.bank.statement.line'
def _get_link_partner_ok(
self, cr, uid, ids, name, args, context=None):
"""
Deliver the values of the function field that
determines if the 'link partner' wizard is show on the
bank statement line
"""
res = {}
for line in self.browse(cr, uid, ids, context):
res[line.id] = bool(
line.state == 'draft'
and not line.partner_id
and line.import_transaction_id
and line.import_transaction_id.remote_account)
return res
_columns = {
'import_transaction_id': fields.many2one(
'banking.import.transaction',
'Import transaction', readonly=True, ondelete='cascade'),
'match_multi': fields.related(
'import_transaction_id', 'match_multi', type='boolean',
string='Multi match', readonly=True),
'residual': fields.related(
'import_transaction_id', 'residual', type='float',
string='Residual', readonly=True,
),
'duplicate': fields.related(
'import_transaction_id', 'duplicate', type='boolean',
string='Possible duplicate import', readonly=True),
'match_type': fields.related(
'import_transaction_id', 'match_type', type='selection',
selection=[
('move', 'Move'),
('invoice', 'Invoice'),
('payment', 'Payment line'),
('payment_order', 'Payment order'),
('storno', 'Storno'),
('manual', 'Manual'),
('payment_manual', 'Payment line (manual)'),
('payment_order_manual', 'Payment order (manual)'),
],
string='Match type', readonly=True,),
'state': fields.selection(
[('draft', 'Draft'), ('confirmed', 'Confirmed')], 'State',
readonly=True, required=True),
'parent_id': fields.many2one(
'account.bank.statement.line',
'Parent',
),
'link_partner_ok': fields.function(
_get_link_partner_ok, type='boolean',
string='Can link partner'),
}
_defaults = {
'state': 'draft',
}
def match_wizard(self, cr, uid, ids, context=None):
res = False
if ids:
if isinstance(ids, (int, float)):
ids = [ids]
if context is None:
context = {}
context['statement_line_id'] = ids[0]
wizard_obj = self.pool.get('banking.transaction.wizard')
res_id = wizard_obj.create(
cr, uid, {'statement_line_id': ids[0]}, context=context)
res = wizard_obj.create_act_window(
cr, uid, res_id, context=context
)
return res
def link_partner(self, cr, uid, ids, context=None):
"""
Get the appropriate partner or fire a wizard to create
or link one
"""
if not ids:
return False
if isinstance(ids, (int, long)):
ids = [ids]
# Check if the partner is already known but not shown
# because the screen was not refreshed yet
statement_line = self.browse(
cr, uid, ids[0], context=context)
if statement_line.partner_id:
return True
# Reuse the bank's partner if any
if (statement_line.partner_bank_id and
statement_line.partner_bank_id.partner_id):
statement_line.write(
{'partner_id': statement_line.partner_bank_id.partner_id.id})
return True
if (not statement_line.import_transaction_id
or not statement_line.import_transaction_id.remote_account):
raise orm.except_orm(
_("Error"),
_("No bank account available to link partner to"))
# Check if the bank account was already been linked
# manually to another transaction
remote_account = statement_line.import_transaction_id.remote_account
source_line_ids = self.search(
cr, uid,
[('import_transaction_id.remote_account', '=', remote_account),
('partner_bank_id.partner_id', '!=', False),
], limit=1, context=context)
if source_line_ids:
source_line = self.browse(
cr, uid, source_line_ids[0], context=context)
target_line_ids = self.search(
cr, uid,
[('import_transaction_id.remote_account', '=', remote_account),
('partner_bank_id', '=', False),
('state', '=', 'draft')], context=context)
self.write(
cr, uid, target_line_ids,
{'partner_bank_id': source_line.partner_bank_id.id,
'partner_id': source_line.partner_bank_id.partner_id.id,
}, context=context)
return True
# Or fire the wizard to link partner and account
wizard_obj = self.pool.get('banking.link_partner')
res_id = wizard_obj.create(
cr, uid, {'statement_line_id': ids[0]}, context=context)
return wizard_obj.create_act_window(cr, uid, res_id, context=context)
def _convert_currency(
self, cr, uid, from_curr_id, to_curr_id, from_amount,
round=False, date=None, context=None):
"""Convert currency amount using the company rate on a specific date"""
curr_obj = self.pool.get('res.currency')
if context:
ctxt = context.copy()
else:
ctxt = {}
if date:
ctxt["date"] = date
amount = curr_obj.compute(
cr, uid, from_curr_id, to_curr_id, from_amount,
round=round, context=ctxt)
return amount
def confirm(self, cr, uid, ids, context=None):
"""
Create (or update) a voucher for each statement line, and then generate
the moves by posting the voucher.
If a line does not have a move line against it, but has an account,
then generate a journal entry that moves the line amount to the
specified account.
"""
statement_pool = self.pool.get('account.bank.statement')
obj_seq = self.pool.get('ir.sequence')
import_transaction_obj = self.pool.get('banking.import.transaction')
for st_line in self.browse(cr, uid, ids, context):
if st_line.state != 'draft':
continue
if st_line.duplicate:
raise orm.except_orm(
_('Bank transfer flagged as duplicate'),
_("You cannot confirm a bank transfer marked as a "
"duplicate (%s.%s)") %
(st_line.statement_id.name, st_line.name,))
if st_line.analytic_account_id:
if not st_line.statement_id.journal_id.analytic_journal_id:
raise orm.except_orm(
_('No Analytic Journal !'),
_("You have to define an analytic journal on the '%s' "
"journal!") % st_line.statement_id.journal_id.name
)
if not st_line.amount:
continue
if not st_line.period_id:
self.write(
cr, uid, [st_line.id], {
'period_id': self._get_period(
cr, uid, date=st_line.date, context=context)
})
st_line.refresh()
# Generate the statement number, if it is not already done
st = st_line.statement_id
if not st.name == '/':
st_number = st.name
else:
if st.journal_id.sequence_id:
period = st.period_id or st_line.period_id
c = {'fiscalyear_id': period.fiscalyear_id.id}
st_number = obj_seq.next_by_id(
cr, uid, st.journal_id.sequence_id.id, context=c
)
else:
st_number = obj_seq.next_by_code(
cr, uid, 'account.bank.statement'
)
statement_pool.write(
cr, uid, [st.id], {'name': st_number}, context=context
)
if st_line.import_transaction_id:
import_transaction_obj.confirm(
cr, uid, st_line.import_transaction_id.id, context)
st_line.refresh()
st_line_number = statement_pool.get_next_st_line_number(
cr, uid, st_number, st_line, context)
company_currency_id = st.journal_id.company_id.currency_id.id
statement_pool.create_move_from_st_line(
cr, uid, st_line.id, company_currency_id, st_line_number,
context
)
self.write(
cr, uid, st_line.id, {'state': 'confirmed'}, context)
return True
def cancel(self, cr, uid, ids, context=None):
if ids and isinstance(ids, (int, float)):
ids = [ids]
import_transaction_obj = self.pool.get('banking.import.transaction')
move_pool = self.pool.get('account.move')
set_draft_ids = []
move_unlink_ids = []
# harvest ids for various actions
for st_line in self.browse(cr, uid, ids, context):
if st_line.state != 'confirmed':
continue
if st_line.statement_id.state != 'draft':
raise orm.except_orm(
_("Cannot cancel bank transaction"),
_("The bank statement that this transaction belongs to "
"has already been confirmed"))
if st_line.import_transaction_id:
# Cancel transaction immediately.
# If it has voucher, this will clean up
# the moves on the st_line.
import_transaction_obj.cancel(
cr, uid, [st_line.import_transaction_id.id],
context=context
)
st_line.refresh()
for line in st_line.move_ids:
# We allow for people canceling and removing
# the associated payments, which can lead to confirmed
# statement lines without an associated move
move_unlink_ids.append(line.id)
set_draft_ids.append(st_line.id)
move_pool.button_cancel(
cr, uid, move_unlink_ids, context=context)
move_pool.unlink(cr, uid, move_unlink_ids, context=context)
self.write(
cr, uid, set_draft_ids, {'state': 'draft'}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
"""
Don't allow deletion of a confirmed statement line
If this statement line comes from a split transaction, give the
amount back
"""
if type(ids) is int:
ids = [ids]
for line in self.browse(cr, uid, ids, context=context):
if line.state == 'confirmed':
raise orm.except_orm(
_('Confirmed Statement Line'),
_("You cannot delete a confirmed Statement Line"
": '%s'") % line.name)
if line.parent_id:
line.parent_id.write(
{
'amount': line.parent_id.amount + line.amount,
}
)
line.parent_id.refresh()
return super(account_bank_statement_line, self).unlink(
cr, uid, ids, context=context)
def create_instant_transaction(self, cr, uid, ids, context=None):
"""
Check for existance of import transaction on the
bank statement lines. Create instant items if appropriate.
This way, the matching wizard works on manually
encoded statements.
The transaction is only filled with the most basic
information. The use of the transaction at this point
is rather to store matching data rather than to
provide data about the transaction which have all been
transferred to the bank statement line.
"""
import_transaction_pool = self.pool.get('banking.import.transaction')
if ids and isinstance(ids, (int, long)):
ids = [ids]
if context is None:
context = {}
localcontext = context.copy()
localcontext['transaction_no_duplicate_search'] = True
for line in self.browse(cr, uid, ids, context=context):
if line.state != 'confirmed' and not line.import_transaction_id:
res = import_transaction_pool.create(
cr, uid, {
'company_id': line.statement_id.company_id.id,
'statement_line_id': line.id,
},
context=localcontext)
self.write(
cr, uid, line.id, {
'import_transaction_id': res},
context=context)
def split_off(self, cr, uid, ids, amount, context=None):
"""
Create a child statement line with amount, deduce that from this line,
change transactions accordingly
"""
if context is None:
context = {}
transaction_pool = self.pool.get('banking.import.transaction')
child_statement_ids = []
for this in self.browse(cr, uid, ids, context):
transaction_data = transaction_pool.copy_data(
cr, uid, this.import_transaction_id.id
)
transaction_data['transferred_amount'] = amount
transaction_data['message'] = ((transaction_data['message'] or '')
+ _(' (split)'))
transaction_data['parent_id'] = this.import_transaction_id.id
transaction_id = transaction_pool.create(
cr,
uid,
transaction_data,
context=dict(context, transaction_no_duplicate_search=True)
)
statement_line_data = self.copy_data(cr, uid, this.id)
statement_line_data['amount'] = amount
statement_line_data['name'] = (
(statement_line_data['name'] or '') + _(' (split)'))
statement_line_data['import_transaction_id'] = transaction_id
statement_line_data['parent_id'] = this.id
statement_line_id = self.create(
cr, uid, statement_line_data, context=context)
child_statement_ids.append(statement_line_id)
transaction_pool.write(
cr, uid, transaction_id, {
'statement_line_id': statement_line_id,
}, context=context)
this.write({'amount': this.amount - amount})
return child_statement_ids
class account_bank_statement(orm.Model):
_inherit = 'account.bank.statement'
def _end_balance(self, cr, uid, ids, name, attr, context=None):
"""
This method taken from account/account_bank_statement.py and
altered to take the statement line subflow into account
"""
res = {}
statements = self.browse(cr, uid, ids, context=context)
for statement in statements:
res[statement.id] = statement.balance_start
# Calculate the balance based on the statement line amounts
# ..they are in the statement currency, no conversion needed.
for line in statement.line_ids:
res[statement.id] += line.amount
for r in res:
res[r] = round(res[r], 2)
return res
def button_confirm_bank(self, cr, uid, ids, context=None):
""" Inject the statement line workflow here """
if context is None:
context = {}
line_obj = self.pool.get('account.bank.statement.line')
for st in self.browse(cr, uid, ids, context=context):
j_type = st.journal_id.type
if not self.check_status_condition(
cr, uid, st.state, journal_type=j_type):
continue
self.balance_check(cr, uid, st.id, journal_type=j_type,
context=context)
if (not st.journal_id.default_credit_account_id) \
or (not st.journal_id.default_debit_account_id):
raise orm.except_orm(
_('Configuration Error !'),
_('Please verify that an account is defined in the '
'journal.')
)
# protect against misguided manual changes
for line in st.move_line_ids:
if line.state != 'valid':
raise orm.except_orm(
_('Error !'),
_('The account entries lines are not in valid state.')
)
line_obj.confirm(cr, uid, [line.id for line in st.line_ids],
context)
st.refresh()
self.message_post(
cr, uid, [st.id],
body=_('Statement %s confirmed, journal items were created.')
% (st.name,), context=context)
return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
"""
Do nothing but write the state. Delegate all actions to the statement
line workflow instead.
"""
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def unlink(self, cr, uid, ids, context=None):
"""
Don't allow deletion of statement with confirmed bank statement lines.
"""
if type(ids) is int:
ids = [ids]
for st in self.browse(cr, uid, ids, context=context):
for line in st.line_ids:
if line.state == 'confirmed':
raise orm.except_orm(
_('Confirmed Statement Lines'),
_("You cannot delete a Statement with confirmed "
"Statement Lines: '%s'") % st.name)
return super(account_bank_statement, self).unlink(
cr, uid, ids, context=context)
_columns = {
# override this field *only* to replace the
# function method with the one from this module.
# Note that it is defined twice, both in
# account/account_bank_statement.py (without 'store') and
# account/account_cash_statement.py (with store=True)
'balance_end': fields.function(
_end_balance, method=True, store=True, string='Balance'),
}
|
GaetanCambier/CouchPotatoServer | refs/heads/develop | couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py | 20 | from bs4 import BeautifulSoup
from nzbdownloader import NZBDownloader
from nzbdownloader import NZBGetURLSearchResult
from couchpotato.core.helpers.encoding import toUnicode,tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import time
log = CPLog(__name__)
class NZBClub(NZBDownloader, NZBProvider, RSS):
urls = {
'search': 'http://www.nzbclub.com/nzbfeeds.aspx?%s',
}
http_time_between_calls = 4 #seconds
def search(self, filename, minSize, newsgroup=None):
q = filename
params = tryUrlencode({
'q': q,
'qq': newsgroup,
'ig': 1,
'rpp': 200,
'st': 5,
'sp': 1,
'ns': 1,
})
nzbs = self.getRSSData(self.urls['search'] % params)
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
# return False
#return True
nzbid = nzbclub_id
#'name': toUnicode(self.getTextElement(nzb, "title")),
age = self.calculateAge(int(time.mktime(parse(date).timetuple())))
sizeInMegs = (tryInt(size)/1024/1024)
downloadUrl = enclosure['url'].replace(' ', '_')
nzbClubURL = self.getTextElement(nzb, "link")
#'get_more_info': self.getMoreInfo,
#'extra_check': extra_check
return NZBGetURLSearchResult( self, downloadUrl, sizeInMegs, nzbClubURL, age, nzbid)
|
pacificIT/linux-udoo | refs/heads/4.0.8 | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
BrianGallew/cassandra_range_repair | refs/heads/master | tests/test_ranges.py | 1 | #! /usr/bin/env python
from __future__ import print_function
import os, sys, unittest, pkg_resources, mock, logging
sys.path.insert(0, '..')
sys.path.insert(0, '.')
sys.path.insert(0,os.path.abspath(__file__+"/../../src"))
import range_repair
def fake_init(self, options):
'''Initialize the Token Container by getting the host and ring tokens and
then confirming the values used for formatting and range
management.
:param options: OptionParser result
:returns: None
'''
self.options = options
self.host_tokens = []
self.ring_tokens = []
self.host_token_count = -1
return
class FakeOptions: pass
class range_tests(unittest.TestCase):
def setUp(self):
range_repair.TokenContainer.__init__ = fake_init
f = FakeOptions()
f.keyspace='pathdb'
f.columnfamily='path_claims'
f.host='db-cdev-1.phx3.llnw.net'
f.steps=5
f.nodetool='nodetool'
f.workers=1
f.local=''
f.snapshot=''
f.verbose=True
f.debug=True
self.f = f
return
def test_Murmur3_range_start_zero(self):
resultset = []
t = range_repair.TokenContainer(self.f)
for x in t.sub_range_generator(0, 3000, steps=5):
resultset.append(x[0])
self.assertEqual(resultset, [t.format(0), t.format(600), t.format(1200), t.format(1800), t.format(2400)])
return
def test_Murmur3_range_end_zero(self):
resultset = []
t = range_repair.TokenContainer(self.f)
for x in t.sub_range_generator(-3000, 0, steps=5):
resultset.append(x[0])
self.assertEqual(resultset, [t.format(-3000), t.format(-2400), t.format(-1800), t.format(-1200), t.format(-600)])
return
def test_Murmur3_range_wrap(self):
resultset = []
t = range_repair.TokenContainer(self.f)
endpoint = (2**63)-30
for x in t.sub_range_generator(endpoint, -endpoint, steps=6):
resultset.append(x[0])
self.assertEqual(len(resultset), 7)
return
def test_Random_range_start_zero(self):
resultset = []
t = range_repair.TokenContainer(self.f)
t.ring_tokens.append(0)
t.check_for_MD5_tokens()
for x in t.sub_range_generator(0, 3000, steps=5):
resultset.append(x[0])
self.assertEqual(resultset, [t.format(0), t.format(600), t.format(1200), t.format(1800), t.format(2400)])
return
def test_Random_range_end_zero(self):
resultset = []
t = range_repair.TokenContainer(self.f)
t.ring_tokens.append(0)
t.check_for_MD5_tokens()
for x in t.sub_range_generator(-3000, 0, steps=5):
resultset.append(x[0])
self.assertEqual(resultset, [t.format(-3000), t.format(-2400), t.format(-1800), t.format(-1200), t.format(-600)])
return
def test_Random_range_wrap(self):
resultset = []
t = range_repair.TokenContainer(self.f)
endpoint = (2**63)-30
t.ring_tokens.append(0)
t.check_for_MD5_tokens()
for x in t.sub_range_generator(endpoint, -endpoint, steps=6):
resultset.append(x[0])
self.assertEqual(len(resultset), 6)
return
def test_Murmur3_format_length(self):
t = range_repair.TokenContainer(self.f)
self.assertEqual(21, len(t.format(0)))
self.assertEqual(21, len(t.format(100)))
self.assertEqual(21, len(t.format(-100)))
|
anbangleo/NlsdeWeb | refs/heads/master | Python-3.6.0/Lib/lib2to3/pytree.py | 30 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from io import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
yield from child.leaves()
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
|
andyfaff/scipy | refs/heads/master | scipy/stats/kde.py | 7 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import mvn
from ._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise TypeError('%s has unexpected item size %d' %
(output_dtype, itemsize))
result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None],
points.T, self.inv_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
L = linalg.cholesky(self.covariance*2*pi)
self.log_det = 2*np.log(np.diag(L)).sum()
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = np.empty((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy.T
result = logsumexp(0.5 * log_to_sum, axis=1)
else:
# loop over points
result = np.empty((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy
result[i] = logsumexp(0.5 * log_to_sum)
return result
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
|
love436053/repos | refs/heads/master | python/wxpython/grid_create.py | 1 | #!/usr/bin/python
# coding:utf-8
import wx
import os
import sys
import wx.grid
class GridWindow(wx.Frame):
def __init__(self, parent, title):
'''
wx.DEFAULT_FRAME_STYLE : 这是每个窗口的缺省风格,包含标题、可调节大小的边框,最大最小化按钮、关闭按钮和系统菜单。
wx.CAPTION : 在框架上增加一个标题栏,它显示该框架的标题属性。
wx.CLOSE_BOX : 指示系统在框架的标题栏上显示一个关闭框,使用系统默认的位置和样式。
Wx.FRAME_ON_TOP : 置顶窗口。
wx.FRAME_SHAP ED : 用这个样式创建的框架可以使用SetShape()方法去创建一个非矩形的窗口。
wx.FRAME_TOOL_WINDOW : 通过给框架一个比正常更小的标题栏,使框架看起来像一个工具框窗口。在Windows下,使用这个样式创建的框架不会出现在显示所有打开窗口的任务栏上。
wx.MAXIMIZE_BOX : 指示系统在框架的标题栏上显示一个最大化框,使用系统默认的位置和样式。
wx.MINIMIZE_BOX : 指示系统在框架的标题栏上显示一个最小化框,使用系统默认的位置和样式。
wx.RESIZE_BORDER : 给框架增加一个可以改变尺寸的边框。
wx.SIMPLE_BORDER : 没有装饰的边框。不能工作在所有平台上。
wx.SYSTEM_MENU : 增加系统菜单(带有关闭、移动、改变尺寸等功能)和关闭框到这个窗口。在系统菜单中的改变尺寸和关闭功能的有效性依赖于wx.MAXIMIZE_BOX, wx.MINIMIZE_BOX和wx.CLOSE_BOX样式是否被应用。
wx.FRAME_EX_META : 如果时在 MacOS 中,这个属性用于是否显示“金属风格”
wx.FRAME_EX_CONTEXTHELP : 是否有联机帮助按钮。
wx.FRAME_FLOAT_ON_PARENT : 窗口是否显示在最上层,与 wx.STAY_ON_TOP 不同,它必须有一个父窗口。
'''
super(GridWindow, self).__init__(parent = parent,
title = title,
size = (350, 250),
style = wx.MINIMIZE_BOX | wx.CAPTION | wx.CLOSE_BOX,
name = os.path.basename(sys.argv[0]).split('.')[0])
self.window_init()
self.event_init()
self.Centre()
self.Show()
def window_init(self):
'''
wx.VERTICAL : 垂直方向
wx.HORIZONTAL: 水平方向
'''
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.form_init()
self.vbox.Add((-1, 10))
self.vbox.Add(self.box_form,
flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP,
border = 5,
proportion = 1)
self.SetSizer(self.vbox)
def form_init(self):
self.box_form = wx.BoxSizer(wx.VERTICAL)
grid_data = [
"liu 20 157171169890 shenzhen".split(),
"li 30 11111122222 guangdong ".split(),
"zhang 25 33333344444 huizhou".split(),
"wang 33 55555566666 shaoguan".split(),
]
self.grid = wx.grid.Grid(self, size = (4, 4))
self.grid.CreateGrid(5, 4)
self.grid.SetLabelBackgroundColour('White')
self.grid.SetCellHighlightColour('Blue')
self.grid.SetGridLineColour('Red')
for row in range(4):
self.grid.SetRowSize(row, 20) # 设置行高度
self.grid.SetRowLabelValue(row, unicode('第%d行', 'utf-8') % (row))
for col in range(4):
self.grid.SetColSize(col, 50) # 设置列宽度
self.grid.SetColLabelValue(col, unicode('第%d列', 'utf-8') % (col))
self.grid.SetCellValue(row, col, grid_data[row][col])
self.grid.SetReadOnly(row, col)
self.grid.SetCellTextColour(row, col, 'Red') # 设置文本颜色
self.grid.SetCellBackgroundColour(row, col, 'Green') # 设置背景颜色
self.grid.SetCellAlignment(row, col, 5, 5)
# print self.grid.GetCellValue(row, col)
# print self.grid.GetRowLabelValue(row)
self.grid.AppendCols(1) # 末尾新增一列
self.grid.HideRowLabels() # 隐藏行标签
# self.grid.HideColLabels()
# self.grid.SetDefaultCellOverflow(False)
'''
0:默认选中一格
1:默认选中一行
2:默认选中一列
'''
self.grid.SetSelectionMode(1)
self.grid.ClearSelection() # 清除选中
self.grid.SelectRow(0) # 初始化选中第一行
# self.grid.AppendRows(1) # 末尾新增一行
self.grid.AppendCols(1) # 末尾新增一列
# self.grid.InsertRows(2, 1) # 第二行后新增一行
self.box_form.Add(self.grid, flag = wx.EXPAND, proportion = 1)
def event_init(self):
'''
wx.grid.EVT_GRID_CELL_CHANGE: 当用户通过编辑器改变单元格中的数据时触发该事件。
wx.grid.EVT_GRID_CELL_LEFT_CLICK: 当用户在一个单元格中敲击鼠标左键时触发该事件。
wx.grid.EVT_GRID_CELL_LEFT_DCLICK: 当用户在一个单元格中双击鼠标左键时触发该事件。
wx.grid.EVT_GRID_CELL_RIGHT_CLICK: 当用户在一个单元格中敲击鼠标右键时触发该事件。
wx.grid.EVT_GRID_CELL_RIGHT_DCLICK: 当用户在一个单元格中双击鼠标右键时触发该事件。
wx.grid.EVT_GRID_EDITOR_HIDDEN: 当在编辑会话结束时隐藏一个单元格编辑器则触发该事件。
wx.grid.EVT_GRID_EDITOR_SHOWN: 当在编辑会话结束时显示一个单元格编辑器则触发该事件。
wx.grid.EVT_GRID_LABEL_LEFT_CLICK: 当用户在行或列的标签区域敲击鼠标左键时触发该事件。
wx.grid.EVT_GRID_LABEL_LEFT_DCLICK: 当用户在行或列的标签区域双击鼠标左键时触发该事件。
wx.grid.EVT_GRID_LABEL_RIGHT_CLICK: 当用户在行或列的标签区域敲击鼠标右键时触发该事件。
wx.grid.EVT_GRID_LABEL_RIGHT_DCLICK:当用户在行或列的标签区域双击鼠标右键时触发该事件。
wx.grid.EVT_GRID_SELECT_CELL: 当用户将焦点移到一个新的单元格,并选择它时触发该事件。
'''
self.grid.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_CELL_RIGHT_DCLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_EDITOR_HIDDEN, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_EDITOR_SHOWN, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_DCLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_RIGHT_CLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_RIGHT_DCLICK, self.grid_event)
self.grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.grid_event)
def grid_event(self, event):
#print event.GetId(), event.EventType
if wx.grid.EVT_GRID_CELL_CHANGE.typeId == event.EventType:
self.event_info(event, 'cell change')
elif wx.grid.EVT_GRID_CELL_LEFT_CLICK.typeId == event.EventType:
self.event_info(event, 'left down')
elif wx.grid.EVT_GRID_CELL_LEFT_DCLICK.typeId == event.EventType:
self.event_info(event, 'left double down')
elif wx.grid.EVT_GRID_CELL_RIGHT_CLICK.typeId == event.EventType:
self.event_info(event, 'right down')
elif wx.grid.EVT_GRID_CELL_RIGHT_DCLICK.typeId == event.EventType:
self.event_info(event, 'right double down')
elif wx.grid.EVT_GRID_EDITOR_HIDDEN.typeId == event.EventType:
self.event_info(event, 'hide')
elif wx.grid.EVT_GRID_EDITOR_SHOWN.typeId == event.EventType:
self.event_info(event, 'show')
elif wx.grid.EVT_GRID_LABEL_LEFT_CLICK.typeId == event.EventType:
self.event_info(event, 'lable left down')
elif wx.grid.EVT_GRID_LABEL_LEFT_DCLICK.typeId == event.EventType:
self.event_info(event, 'lable left double down')
elif wx.grid.EVT_GRID_LABEL_RIGHT_CLICK.typeId == event.EventType:
self.event_info(event, 'lable right down')
elif wx.grid.EVT_GRID_LABEL_RIGHT_DCLICK.typeId == event.EventType:
self.event_info(event, 'lable right double down')
elif wx.grid.EVT_GRID_SELECT_CELL.typeId == event.EventType:
self.event_info(event, 'new cell')
event.Skip()
def event_info(self, event, msg):
print msg, self.grid.GetNumberRows(), self.grid.GetNumberCols()
print event.GetRow(), event.GetCol(), self.grid.GetCellValue(event.GetRow(), event.GetCol())
print self.grid.GetSelectedCells(), self.grid.GetSelectedRows(), self.grid.GetSelectedCols()
#print self.grid.GetCell
# event.AltDown()
# event.ControlDown()
# event.GetCol()
# event.GetPosition()
# event.GetRow()
# event.MetaDown()
# event.Selecting()
# event.ShiftDown()
if __name__ == '__main__':
app = wx.App()
GridWindow(None, title = 'form')
app.MainLoop()
|
ifduyue/tornado | refs/heads/master | maint/test/redbot/red_test.py | 6 | #!/usr/bin/env python
import logging
from redbot.resource import HttpResource
import redbot.speak as rs
import thor
import threading
from tornado import gen
from tornado.options import parse_command_line
from tornado.testing import AsyncHTTPTestCase
from tornado.web import RequestHandler, Application, asynchronous
import unittest
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello world")
class RedirectHandler(RequestHandler):
def get(self, path):
self.redirect(path, status=int(self.get_argument('status', '302')))
class PostHandler(RequestHandler):
def post(self):
assert self.get_argument('foo') == 'bar'
self.redirect('/hello', status=303)
class ChunkedHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.write('hello ')
yield gen.Task(self.flush)
self.write('world')
yield gen.Task(self.flush)
self.finish()
class CacheHandler(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
class TestMixin(object):
def get_handlers(self):
return [
('/hello', HelloHandler),
('/redirect(/.*)', RedirectHandler),
('/post', PostHandler),
('/chunked', ChunkedHandler),
('/cache/(.*)', CacheHandler),
]
def get_app_kwargs(self):
return dict(static_path='.')
def get_allowed_warnings(self):
return [
# We can't set a non-heuristic freshness at the framework level,
# so just ignore this warning
rs.FRESHNESS_HEURISTIC,
# For our small test responses the Content-Encoding header
# wipes out any gains from compression
rs.CONNEG_GZIP_BAD,
]
def get_allowed_errors(self):
return []
def check_url(self, path, method='GET', body=None, headers=None,
expected_status=200, allowed_warnings=None,
allowed_errors=None):
url = self.get_url(path)
red = self.run_redbot(url, method, body, headers)
if not red.response.complete:
if isinstance(red.response.http_error, Exception):
logging.warning((red.response.http_error.desc, vars(red.response.http_error), url))
raise red.response.http_error.res_error
else:
raise Exception("unknown error; incomplete response")
self.assertEqual(int(red.response.status_code), expected_status)
allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings()
allowed_errors = (allowed_errors or []) + self.get_allowed_errors()
errors = []
warnings = []
for msg in red.response.notes:
if msg.level == 'bad':
logger = logging.error
if not isinstance(msg, tuple(allowed_errors)):
errors.append(msg)
elif msg.level == 'warning':
logger = logging.warning
if not isinstance(msg, tuple(allowed_warnings)):
warnings.append(msg)
elif msg.level in ('good', 'info', 'uri'):
logger = logging.info
else:
raise Exception('unknown level' + msg.level)
logger('%s: %s (%s)', msg.category, msg.show_summary('en'),
msg.__class__.__name__)
logger(msg.show_text('en'))
self.assertEqual(len(warnings) + len(errors), 0,
'Had %d unexpected warnings and %d errors' %
(len(warnings), len(errors)))
def run_redbot(self, url, method, body, headers):
red = HttpResource(url, method=method, req_body=body,
req_hdrs=headers)
def work():
red.run(thor.stop)
thor.run()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=work)
thread.start()
self.wait()
thread.join()
return red
def test_hello(self):
self.check_url('/hello')
def test_static(self):
# TODO: 304 responses SHOULD return the same etag that a full
# response would. We currently do for If-None-Match, but not
# for If-Modified-Since (because IMS does not otherwise
# require us to read the file from disk)
self.check_url('/static/red_test.py',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_static_versioned_url(self):
self.check_url('/static/red_test.py?v=1234',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_redirect(self):
self.check_url('/redirect/hello', expected_status=302)
def test_permanent_redirect(self):
self.check_url('/redirect/hello?status=301', expected_status=301)
def test_404(self):
self.check_url('/404', expected_status=404)
def test_post(self):
body = 'foo=bar'
# Without an explicit Content-Length redbot will try to send the
# request chunked.
self.check_url(
'/post', method='POST', body=body,
headers=[('Content-Length', str(len(body))),
('Content-Type', 'application/x-www-form-urlencoded')],
expected_status=303)
def test_chunked(self):
self.check_url('/chunked')
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = '*'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304,
allowed_warnings=[rs.MISSING_HDRS_304])
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
class DefaultHTTPTest(AsyncHTTPTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), **self.get_app_kwargs())
class GzipHTTPTest(AsyncHTTPTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs())
def get_allowed_errors(self):
return super(GzipHTTPTest, self).get_allowed_errors() + [
# TODO: The Etag is supposed to change when Content-Encoding is
# used. This should be fixed, but it's difficult to do with the
# way GZipContentEncoding fits into the pipeline, and in practice
# it doesn't seem likely to cause any problems as long as we're
# using the correct Vary header.
rs.VARY_ETAG_DOESNT_CHANGE,
]
if __name__ == '__main__':
parse_command_line()
unittest.main()
|
petwitter/petwitter | refs/heads/master | thesite/manage.py | 2 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thesite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
calancha/DIRAC | refs/heads/rel-v6r12 | DataManagementSystem/Agent/FTSSubmitAgent.py | 3 | ########################################################################
# $HeadURL$
########################################################################
""" :mod: FTSSubmitAgent
====================
FTS Submit Agent takes files from the TransferDB and submits them to the FTS using
FTSRequest helper class.
:deprecated:
"""
# # imports
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.DataManagementSystem.DB.TransferDB import TransferDB
from DIRAC.DataManagementSystem.Client.FTSRequest import FTSRequest
__RCSID__ = "$Id$"
class FTSSubmitAgent( AgentModule ):
"""
.. class:: FTSSubmitAgent
This class is submitting previously scheduled files to the FTS system using helper class FTSRequest.
Files to be transferred are read from TransferDB.Channel table, only those with Status = 'Waiting'.
After submission TransferDB.Channel.Status is set to 'Executing'. The rest of state propagation is
done in FTSMonitorAgent.
An information about newly created FTS request is hold in TransferDB.FTSReq (request itself) table and
TransferDB.FileToFTS (files in request) and TransferDB.FileToCat (files to be registered, for
failover only).
"""
# # placaholder for TransferDB reference
transferDB = None
# # placeholder for max job per channel
maxJobsPerChannel = 2
# # placeholder for checksum test option flag
cksmTest = False
# # placeholder for checksum type
cksmType = ""
# # default checksum type
__defaultCksmType = "ADLER32"
def initialize( self ):
""" agent's initalisation
:param self: self reference
"""
# # save tarsnferDB handler
self.transferDB = TransferDB()
# # read config options
self.maxJobsPerChannel = self.am_getOption( 'MaxJobsPerChannel', self.maxJobsPerChannel )
self.log.info( "max jobs/channel = %s" % self.maxJobsPerChannel )
# # checksum test
self.cksmTest = bool( self.am_getOption( "ChecksumTest", False ) )
# # ckecksum type
if self.cksmTest:
self.cksmType = str( self.am_getOption( "ChecksumType", self.__defaultCksmType ) ).upper()
if self.cksmType and self.cksmType not in ( "ADLER32", "MD5", "SHA1" ):
self.log.warn( "unknown checksum type: %s, will use default %s" % ( self.cksmType, self.__defaultCksmType ) )
self.cksmType = self.__defaultCksmType
self.log.info( "checksum test is %s" % ( { True : "enabled using %s checksum" % self.cksmType,
False : "disabled"}[self.cksmTest] ) )
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.filesBeingStaged = {}
return S_OK()
def execute( self ):
""" execution in one agent's cycle
:param self: self reference
"""
#########################################################################
# Obtain the eligible channels for submission.
self.log.info( 'Obtaining channels eligible for submission.' )
res = self.transferDB.selectChannelsForSubmission( self.maxJobsPerChannel )
if not res['OK']:
self.log.error( "Failed to retrieve channels for submission.", res['Message'] )
return S_OK()
elif not res['Value']:
self.log.info( "FTSSubmitAgent. No channels eligible for submission." )
return S_OK()
channelDicts = res['Value']
self.log.info( 'Found %s eligible channels.' % len( channelDicts ) )
#########################################################################
# Submit to all the eligible waiting channels.
i = 1
for channelDict in channelDicts:
infoStr = "\n\n##################################################################################\n\n"
infoStr = "%sStarting submission loop %s of %s\n\n" % ( infoStr, i, len( channelDicts ) )
self.log.info( infoStr )
res = self.submitTransfer( channelDict )
i += 1
return S_OK()
def submitTransfer( self, channelDict ):
""" create and submit FTS jobs based on information it gets from the DB
:param self: self reference
:param dict channelDict: dict with channel info as read from TransferDB.selectChannelsForSubmission
"""
channelID = channelDict['ChannelID']
filesPerJob = channelDict['NumFiles']
#########################################################################
# Obtain the first files in the selected channel.
self.log.info( "FTSSubmitAgent.submitTransfer: Attempting to obtain files for channel %s: %s to %s"
% ( channelID, channelDict['Source'], channelDict['Destination'] ) )
res = self.transferDB.getFilesForChannel( channelID, 2 * filesPerJob )
if not res['OK']:
errStr = 'FTSSubmitAgent.%s' % res['Message']
self.log.error( errStr )
return S_OK()
if not res['Value']:
self.log.info( "FTSSubmitAgent.submitTransfer: No files found for channel." )
return S_OK()
filesDict = res['Value']
sourceSE = filesDict['SourceSE']
targetSE = filesDict['TargetSE']
self.log.info( 'Obtained %s files for channel %s to %s' % ( len( filesDict['Files'] ), sourceSE, targetSE ) )
if self.filesBeingStaged.get( channelID ):
self.log.info( '%d files are currently in staging status' % len( self.filesBeingStaged[channelID] ) )
# Create the FTSRequest object for preparing the submission
oFTSRequest = FTSRequest()
oFTSRequest.setSourceSE( sourceSE )
oFTSRequest.setTargetSE( targetSE )
files = filesDict['Files']
# # enable/disable cksm test
oFTSRequest.setCksmTest( self.cksmTest )
if self.cksmType:
oFTSRequest.setCksmType( self.cksmType )
#########################################################################
# Populate the FTS Request with the files.
self.log.info( 'Populating the FTS request with file information' )
fileIDs = []
totalSize = 0
fileIDSizes = {}
lfns = set()
for fileMeta in files:
lfn = fileMeta['LFN']
lfns.add( lfn )
oFTSRequest.setLFN( lfn )
oFTSRequest.setSourceSURL( lfn, fileMeta['SourceSURL'] )
oFTSRequest.setTargetSURL( lfn, fileMeta['TargetSURL'] )
if lfn in self.filesBeingStaged.get( channelID, [] ):
oFTSRequest.setStatus( lfn, 'Staging' )
fileID = fileMeta['FileID']
fileIDs.append( fileID )
totalSize += fileMeta['Size']
fileIDSizes[fileID] = fileMeta['Size']
oFTSRequest.resolveSource()
noSource = [ lfn for lfn, fileInfo in oFTSRequest.fileDict.items()
if fileInfo.get( "Status", "" ) == "Failed" and fileInfo.get( "Reason", "" ) in ( "No replica at SourceSE",
"Source file does not exist" ) ]
toReschedule = [fileMeta["FileID"] for fileMeta in files if fileMeta["LFN"] in noSource]
if toReschedule:
self.log.info( "Found %s files to reschedule" % len( toReschedule ) )
for fileID in toReschedule:
res = self.transferDB.setFileToReschedule( fileID )
if not res["OK"]:
self.log.error( "Failed to update Channel table for failed files.", res["Message"] )
elif res["Value"] == "max reschedule attempt reached":
self.log.error( "setting Channel status to 'Failed' : " % res["Value"] )
res = self.transferDB.setFileChannelStatus( channelID, fileID, 'Failed' )
if not res["OK"]:
self.log.error( "Failed to update Channel table for failed files.", res["Message"] )
#########################################################################
# Submit the FTS request and retrieve the FTS GUID/Server
self.log.info( 'Submitting the FTS request' )
res = oFTSRequest.submit()
if not res['OK']:
errStr = "FTSSubmitAgent.submit: %s" % res['Message']
self.log.error( errStr )
self.log.info( 'Updating the Channel table for files to retry' )
res = self.transferDB.resetFileChannelStatus( channelID, fileIDs )
if not res['OK']:
self.log.error( 'Failed to update the Channel table for file to retry.', res['Message'] )
return S_ERROR( errStr )
ftsGUID = res['Value']['ftsGUID']
ftsServer = res['Value']['ftsServer']
nbSubmitted = res['Value']['submittedFiles']
infoStr = """Submitted FTS Job:
FTS Guid: %s
FTS Server: %s
ChannelID: %s
SourceSE: %s
TargetSE: %s
Files: %s
""" % ( ftsGUID, ftsServer, str( channelID ), sourceSE, targetSE, str( nbSubmitted ) )
self.log.info( infoStr )
# # filter out skipped files
failedFiles = oFTSRequest.getFailed()['Value']
stagingFiles = oFTSRequest.getStaging()['Value']
# cache files being staged
self.filesBeingStaged.setdefault( channelID, set() ).update( stagingFiles )
submittedFiles = lfns.difference( failedFiles, stagingFiles )
# files being submitted are staged
self.filesBeingStaged[channelID] -= submittedFiles
failedIDs = set( [ meta["FileID"] for meta in files if meta["LFN"] in failedFiles ] )
stagingIDs = set( [ meta["FileID"] for meta in files if meta["LFN"] in stagingFiles ] )
# # only submitted
submittedIDs = set( fileIDs ) - failedIDs - stagingIDs
# # only count the submitted size
totalSize = sum( [ meta["Size"] for meta in files if meta["FileID"] in submittedIDs ] )
#########################################################################
# Insert the FTS Req details and add the number of files and size
res = self.transferDB.insertFTSReq( ftsGUID, ftsServer, channelID )
if not res['OK']:
errStr = "FTSSubmitAgent.%s" % res['Message']
self.log.error( errStr )
return S_ERROR( errStr )
ftsReqID = res['Value']
self.log.info( 'Obtained FTS RequestID %s' % ftsReqID )
res = self.transferDB.setFTSReqAttribute( ftsReqID, 'SourceSE', sourceSE )
if not res['OK']:
self.log.error( "Failed to set SourceSE for FTSRequest", res['Message'] )
res = self.transferDB.setFTSReqAttribute( ftsReqID, 'TargetSE', targetSE )
if not res['OK']:
self.log.error( "Failed to set TargetSE for FTSRequest", res['Message'] )
res = self.transferDB.setFTSReqAttribute( ftsReqID, 'NumberOfFiles', len( submittedIDs ) )
if not res['OK']:
self.log.error( "Failed to set NumberOfFiles for FTSRequest", res['Message'] )
res = self.transferDB.setFTSReqAttribute( ftsReqID, 'TotalSize', totalSize )
if not res['OK']:
self.log.error( "Failed to set TotalSize for FTSRequest", res['Message'] )
#########################################################################
# Insert the submission event in the FTSReqLogging table
event = 'Submitted'
res = self.transferDB.addLoggingEvent( ftsReqID, event )
if not res['OK']:
errStr = "FTSSubmitAgent.%s" % res['Message']
self.log.error( errStr )
#########################################################################
# Insert the FileToFTS details and remove the files from the channel
self.log.info( 'Setting the files as Executing in the Channel table' )
res = self.transferDB.setChannelFilesExecuting( channelID, list( submittedIDs ) )
if not res['OK']:
self.log.error( 'Failed to update the Channel tables for files.', res['Message'] )
lfns = []
fileToFTSFileAttributes = []
for fileMeta in files:
fileID = fileMeta['FileID']
# Staging is not an error case
if fileID not in stagingIDs:
lfns.append( fileMeta['LFN'] )
fileToFTSFileAttributes.append( ( fileID, fileIDSizes[fileID] ) )
self.log.info( 'Populating the FileToFTS table with file information' )
res = self.transferDB.setFTSReqFiles( ftsReqID, channelID, fileToFTSFileAttributes )
if not res['OK']:
self.log.error( 'Failed to populate the FileToFTS table with files.' )
|
andresriancho/boto | refs/heads/develop | boto/datapipeline/layer1.py | 14 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.datapipeline import exceptions
class DataPipelineConnection(AWSQueryConnection):
"""
This is the AWS Data Pipeline API Reference . This guide provides
descriptions and samples of the AWS Data Pipeline API.
AWS Data Pipeline is a web service that configures and manages a
data-driven workflow called a pipeline. AWS Data Pipeline handles
the details of scheduling and ensuring that data dependencies are
met so your application can focus on processing the data.
The AWS Data Pipeline API implements two main sets of
functionality. The first set of actions configure the pipeline in
the web service. You call these actions to create a pipeline and
define data sources, schedules, dependencies, and the transforms
to be performed on the data.
The second set of actions are used by a task runner application
that calls the AWS Data Pipeline API to receive the next task
ready for processing. The logic for performing the task, such as
querying the data, running data analysis, or converting the data
from one format to another, is contained within the task runner.
The task runner performs the task assigned to it by the web
service, reporting progress to the web service as it does so. When
the task is done, the task runner reports the final success or
failure of the task to the web service.
AWS Data Pipeline provides an open-source implementation of a task
runner called AWS Data Pipeline Task Runner. AWS Data Pipeline
Task Runner provides logic for common data management scenarios,
such as performing database queries and running data analysis
using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data
Pipeline Task Runner as your task runner, or you can write your
own task runner to provide custom data management.
The AWS Data Pipeline API uses the Signature Version 4 protocol
for signing requests. For more information about how to sign a
request with this protocol, see `Signature Version 4 Signing
Process`_. In the code examples in this reference, the Signature
Version 4 Request parameters are represented as AuthParams.
"""
APIVersion = "2012-10-29"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com"
ServiceName = "DataPipeline"
TargetPrefix = "DataPipeline"
ResponseError = JSONResponseError
_faults = {
"PipelineDeletedException": exceptions.PipelineDeletedException,
"InvalidRequestException": exceptions.InvalidRequestException,
"TaskNotFoundException": exceptions.TaskNotFoundException,
"PipelineNotFoundException": exceptions.PipelineNotFoundException,
"InternalServiceError": exceptions.InternalServiceError,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def activate_pipeline(self, pipeline_id):
"""
Validates a pipeline and initiates processing. If the pipeline
does not pass validation, activation fails.
Call this action to start processing pipeline tasks of a
pipeline you've created using the CreatePipeline and
PutPipelineDefinition actions. A pipeline cannot be modified
after it has been successfully activated.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to activate.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='ActivatePipeline',
body=json.dumps(params))
def create_pipeline(self, name, unique_id, description=None):
"""
Creates a new empty pipeline. When this action succeeds, you
can then use the PutPipelineDefinition action to populate the
pipeline.
:type name: string
:param name: The name of the new pipeline. You can use the same name
for multiple pipelines associated with your AWS account, because
AWS Data Pipeline assigns each new pipeline a unique pipeline
identifier.
:type unique_id: string
:param unique_id: A unique identifier that you specify. This identifier
is not the same as the pipeline identifier assigned by AWS Data
Pipeline. You are responsible for defining the format and ensuring
the uniqueness of this identifier. You use this parameter to ensure
idempotency during repeated calls to CreatePipeline. For example,
if the first call to CreatePipeline does not return a clear
success, you can pass in the same unique identifier and pipeline
name combination on a subsequent call to CreatePipeline.
CreatePipeline ensures that if a pipeline already exists with the
same name and unique identifier, a new pipeline will not be
created. Instead, you'll receive the pipeline identifier from the
previous attempt. The uniqueness of the name and unique identifier
combination is scoped to the AWS account or IAM user credentials.
:type description: string
:param description: The description of the new pipeline.
"""
params = {'name': name, 'uniqueId': unique_id, }
if description is not None:
params['description'] = description
return self.make_request(action='CreatePipeline',
body=json.dumps(params))
def delete_pipeline(self, pipeline_id):
"""
Permanently deletes a pipeline, its pipeline definition and
its run history. You cannot query or restore a deleted
pipeline. AWS Data Pipeline will attempt to cancel instances
associated with the pipeline that are currently being
processed by task runners. Deleting a pipeline cannot be
undone.
To temporarily pause a pipeline instead of deleting it, call
SetStatus with the status set to Pause on individual
components. Components that are paused by SetStatus can be
resumed.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be deleted.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='DeletePipeline',
body=json.dumps(params))
def describe_objects(self, object_ids, pipeline_id, marker=None,
evaluate_expressions=None):
"""
Returns the object definitions for a set of objects associated
with the pipeline. Object definitions are composed of a set of
fields that define the properties of the object.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline that contains the object
definitions.
:type object_ids: list
:param object_ids: Identifiers of the pipeline objects that contain the
definitions to be described. You can pass as many as 25 identifiers
in a single call to DescribeObjects.
:type evaluate_expressions: boolean
:param evaluate_expressions: Indicates whether any expressions in the
object should be evaluated when the object descriptions are
returned.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call DescribeObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
DescribeObjects again and pass the marker value from the response
to retrieve the next set of results.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
}
if evaluate_expressions is not None:
params['evaluateExpressions'] = evaluate_expressions
if marker is not None:
params['marker'] = marker
return self.make_request(action='DescribeObjects',
body=json.dumps(params))
def describe_pipelines(self, pipeline_ids):
"""
Retrieve metadata about one or more pipelines. The information
retrieved includes the name of the pipeline, the pipeline
identifier, its current state, and the user account that owns
the pipeline. Using account credentials, you can retrieve
metadata about pipelines that you or your IAM users have
created. If you are using an IAM user account, you can
retrieve metadata about only those pipelines you have read
permission for.
To retrieve the full pipeline definition instead of metadata
about the pipeline, call the GetPipelineDefinition action.
:type pipeline_ids: list
:param pipeline_ids: Identifiers of the pipelines to describe. You can
pass as many as 25 identifiers in a single call to
DescribePipelines. You can obtain pipeline identifiers by calling
ListPipelines.
"""
params = {'pipelineIds': pipeline_ids, }
return self.make_request(action='DescribePipelines',
body=json.dumps(params))
def evaluate_expression(self, pipeline_id, expression, object_id):
"""
Evaluates a string in the context of a specified object. A
task runner can use this action to evaluate SQL queries stored
in Amazon S3.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type object_id: string
:param object_id: The identifier of the object.
:type expression: string
:param expression: The expression to evaluate.
"""
params = {
'pipelineId': pipeline_id,
'objectId': object_id,
'expression': expression,
}
return self.make_request(action='EvaluateExpression',
body=json.dumps(params))
def get_pipeline_definition(self, pipeline_id, version=None):
"""
Returns the definition of the specified pipeline. You can call
GetPipelineDefinition to retrieve the pipeline definition you
provided using PutPipelineDefinition.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type version: string
:param version: The version of the pipeline definition to retrieve.
This parameter accepts the values `latest` (default) and `active`.
Where `latest` indicates the last definition saved to the pipeline
and `active` indicates the last definition of the pipeline that was
activated.
"""
params = {'pipelineId': pipeline_id, }
if version is not None:
params['version'] = version
return self.make_request(action='GetPipelineDefinition',
body=json.dumps(params))
def list_pipelines(self, marker=None):
"""
Returns a list of pipeline identifiers for all active
pipelines. Identifiers are returned only for pipelines you
have permission to access.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call ListPipelines, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
ListPipelines again and pass the marker value from the response to
retrieve the next set of results.
"""
params = {}
if marker is not None:
params['marker'] = marker
return self.make_request(action='ListPipelines',
body=json.dumps(params))
def poll_for_task(self, worker_group, hostname=None,
instance_identity=None):
"""
Task runners call this action to receive a task to perform
from AWS Data Pipeline. The task runner specifies which tasks
it can perform by setting a value for the workerGroup
parameter of the PollForTask call. The task returned by
PollForTask may come from any of the pipelines that match the
workerGroup value passed in by the task runner and that was
launched using the IAM user credentials specified by the task
runner.
If tasks are ready in the work queue, PollForTask returns a
response immediately. If no tasks are available in the queue,
PollForTask uses long-polling and holds on to a poll
connection for up to a 90 seconds during which time the first
newly scheduled task is handed to the task runner. To
accomodate this, set the socket timeout in your task runner to
90 seconds. The task runner should not call PollForTask again
on the same `workerGroup` until it receives a response, and
this may take up to 90 seconds.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
PollForTask. There are no wildcard values permitted in
`workerGroup`, the string must be an exact, case-sensitive, match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
:type instance_identity: dict
:param instance_identity: Identity information for the Amazon EC2
instance that is hosting the task runner. You can get this value by
calling the URI, `http://169.254.169.254/latest/meta-data/instance-
id`, from the EC2 instance. For more information, go to `Instance
Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing
in this value proves that your task runner is running on an EC2
instance, and ensures the proper AWS Data Pipeline service charges
are applied to your pipeline.
"""
params = {'workerGroup': worker_group, }
if hostname is not None:
params['hostname'] = hostname
if instance_identity is not None:
params['instanceIdentity'] = instance_identity
return self.make_request(action='PollForTask',
body=json.dumps(params))
def put_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Adds tasks, schedules, and preconditions that control the
behavior of the pipeline. You can use PutPipelineDefinition to
populate a new pipeline or to update an existing pipeline that
has not yet been activated.
PutPipelineDefinition also validates the configuration as it
adds it to the pipeline. Changes to the pipeline are saved
unless one of the following three validation errors exists in
the pipeline.
#. An object is missing a name or identifier field.
#. A string or reference field is empty.
#. The number of objects in the pipeline exceeds the maximum
allowed objects.
Pipeline object definitions are passed to the
PutPipelineDefinition action and returned by the
GetPipelineDefinition action.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be configured.
:type pipeline_objects: list
:param pipeline_objects: The objects that define the pipeline. These
will overwrite the existing pipeline definition.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='PutPipelineDefinition',
body=json.dumps(params))
def query_objects(self, pipeline_id, sphere, marker=None, query=None,
limit=None):
"""
Queries a pipeline for the names of objects that match a
specified set of conditions.
The objects returned by QueryObjects are paginated and then
filtered by the value you set for query. This means the action
may return an empty result set with a value set for marker. If
`HasMoreResults` is set to `True`, you should continue to call
QueryObjects, passing in the returned value for marker, until
`HasMoreResults` returns `False`.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline to be queried for object
names.
:type query: dict
:param query: Query that defines the objects to be returned. The Query
object can contain a maximum of ten selectors. The conditions in
the query are limited to top-level String fields in the object.
These filters can be applied to components, instances, and
attempts.
:type sphere: string
:param sphere: Specifies whether the query applies to components or
instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call QueryObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
QueryObjects again and pass the marker value from the response to
retrieve the next set of results.
:type limit: integer
:param limit: Specifies the maximum number of object names that
QueryObjects will return in a single call. The default value is
100.
"""
params = {'pipelineId': pipeline_id, 'sphere': sphere, }
if query is not None:
params['query'] = query
if marker is not None:
params['marker'] = marker
if limit is not None:
params['limit'] = limit
return self.make_request(action='QueryObjects',
body=json.dumps(params))
def report_task_progress(self, task_id):
"""
Updates the AWS Data Pipeline service on the progress of the
calling task runner. When the task runner is assigned a task,
it should call ReportTaskProgress to acknowledge that it has
the task within 2 minutes. If the web service does not recieve
this acknowledgement within the 2 minute window, it will
assign the task in a subsequent PollForTask call. After this
initial acknowledgement, the task runner only needs to report
progress every 15 minutes to maintain its ownership of the
task. You can change this reporting time from 15 minutes by
specifying a `reportProgressTimeout` field in your pipeline.
If a task runner does not report its status after 5 minutes,
AWS Data Pipeline will assume that the task runner is unable
to process the task and will reassign the task in a subsequent
response to PollForTask. task runners should call
ReportTaskProgress every 60 seconds.
:type task_id: string
:param task_id: Identifier of the task assigned to the task runner.
This value is provided in the TaskObject that the service returns
with the response for the PollForTask action.
"""
params = {'taskId': task_id, }
return self.make_request(action='ReportTaskProgress',
body=json.dumps(params))
def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None,
hostname=None):
"""
Task runners call ReportTaskRunnerHeartbeat every 15 minutes
to indicate that they are operational. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS
Data Pipeline, the web service can use this call to detect
when the task runner application has failed and restart a new
instance.
:type taskrunner_id: string
:param taskrunner_id: The identifier of the task runner. This value
should be unique across your AWS account. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS Data
Pipeline, the web service provides a unique identifier when it
launches the application. If you have written a custom task runner,
you should assign a unique identifier for the task runner.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
ReportTaskRunnerHeartbeat. There are no wildcard values permitted
in `workerGroup`, the string must be an exact, case-sensitive,
match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
"""
params = {'taskrunnerId': taskrunner_id, }
if worker_group is not None:
params['workerGroup'] = worker_group
if hostname is not None:
params['hostname'] = hostname
return self.make_request(action='ReportTaskRunnerHeartbeat',
body=json.dumps(params))
def set_status(self, object_ids, status, pipeline_id):
"""
Requests that the status of an array of physical or logical
pipeline objects be updated in the pipeline. This update may
not occur immediately, but is eventually consistent. The
status that can be set depends on the type of object.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline that contains the objects.
:type object_ids: list
:param object_ids: Identifies an array of objects. The corresponding
objects can be either physical or components, but not a mix of both
types.
:type status: string
:param status: Specifies the status to be set on all the objects in
`objectIds`. For components, this can be either `PAUSE` or
`RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or
`MARK_FINISHED`.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
'status': status,
}
return self.make_request(action='SetStatus',
body=json.dumps(params))
def set_task_status(self, task_id, task_status, error_id=None,
error_message=None, error_stack_trace=None):
"""
Notifies AWS Data Pipeline that a task is completed and
provides information about the final status. The task runner
calls this action regardless of whether the task was
sucessful. The task runner does not need to call SetTaskStatus
for tasks that are canceled by the web service during a call
to ReportTaskProgress.
:type task_id: string
:param task_id: Identifies the task assigned to the task runner. This
value is set in the TaskObject that is returned by the PollForTask
action.
:type task_status: string
:param task_status: If `FINISHED`, the task successfully completed. If
`FAILED` the task ended unsuccessfully. The `FALSE` value is used
by preconditions.
:type error_id: string
:param error_id: If an error occurred during the task, this value
specifies an id value that represents the error. This value is set
on the physical attempt object. It is used to display error
information to the user. It should not start with string "Service_"
which is reserved by the system.
:type error_message: string
:param error_message: If an error occurred during the task, this value
specifies a text description of the error. This value is set on the
physical attempt object. It is used to display error information to
the user. The web service does not parse this value.
:type error_stack_trace: string
:param error_stack_trace: If an error occurred during the task, this
value specifies the stack trace associated with the error. This
value is set on the physical attempt object. It is used to display
error information to the user. The web service does not parse this
value.
"""
params = {'taskId': task_id, 'taskStatus': task_status, }
if error_id is not None:
params['errorId'] = error_id
if error_message is not None:
params['errorMessage'] = error_message
if error_stack_trace is not None:
params['errorStackTrace'] = error_stack_trace
return self.make_request(action='SetTaskStatus',
body=json.dumps(params))
def validate_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Tests the pipeline definition with a set of validation checks
to ensure that it is well formed and can run without error.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline whose definition is to be
validated.
:type pipeline_objects: list
:param pipeline_objects: A list of objects that define the pipeline
changes to validate against the pipeline.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='ValidatePipelineDefinition',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read()
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
executive-consultants-of-los-angeles/rsum | refs/heads/master | rsum/home/apps.py | 1 | """Home apps module."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class HomeConfig(AppConfig):
"""Home Config class."""
name = 'home'
|
kevinthesun/mxnet | refs/heads/master | example/reinforcement-learning/dqn/utils.py | 52 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import os
import numpy
import json
import sys
import re
import scipy.signal
import logging
import ast
import inspect
import collections
import numbers
try:
import cPickle as pickle
except:
import pickle
from collections import namedtuple, OrderedDict
import time
import mxnet as mx
import mxnet.ndarray as nd
_ctx = mx.cpu()
_numpy_rng = numpy.random.RandomState(123456)
def get_default_ctx():
return _ctx
def get_numpy_rng():
return _numpy_rng
def get_saving_path(prefix="", epoch=None):
sym_saving_path = os.path.join('%s-symbol.json' % prefix)
if epoch is not None:
param_saving_path = os.path.join('%s-%05d.params' % (prefix, epoch))
else:
param_saving_path = os.path.join('%s.params' % prefix)
misc_saving_path = os.path.join('%s-misc.json' % prefix)
return sym_saving_path, param_saving_path, misc_saving_path
def logging_config(name=None, level=logging.DEBUG, console_level=logging.DEBUG):
if name is None:
name = inspect.stack()[1][1].split('.')[0]
folder = os.path.join(os.getcwd(), name)
if not os.path.exists(folder):
os.makedirs(folder)
logpath = os.path.join(folder, name + ".log")
print("All Logs will be saved to %s" %logpath)
logging.root.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logging.root.addHandler(logfile)
#TODO Update logging patterns in other files
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logging.root.addHandler(logconsole)
return folder
def save_params(dir_path=os.curdir, epoch=None, name="", params=None, aux_states=None,
ctx=mx.cpu()):
prefix = os.path.join(dir_path, name)
_, param_saving_path, _ = get_saving_path(prefix, epoch)
if not os.path.isdir(dir_path) and not (dir_path == ""):
os.makedirs(dir_path)
save_dict = {('arg:%s' % k): v.copyto(ctx) for k, v in params.items()}
save_dict.update({('aux:%s' % k): v.copyto(ctx) for k, v in aux_states.items()})
nd.save(param_saving_path, save_dict)
return param_saving_path
def save_misc(dir_path=os.curdir, epoch=None, name="", content=None):
prefix = os.path.join(dir_path, name)
_, _, misc_saving_path = get_saving_path(prefix, epoch)
with open(misc_saving_path, 'w') as fp:
json.dump(content, fp)
return misc_saving_path
def quick_save_json(dir_path=os.curdir, file_name="", content=None):
file_path = os.path.join(dir_path, file_name)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
with open(file_path, 'w') as fp:
json.dump(content, fp)
logging.info('Save json into %s' % file_path)
def safe_eval(expr):
if type(expr) is str:
return ast.literal_eval(expr)
else:
return expr
def norm_clipping(params_grad, threshold):
assert isinstance(params_grad, dict)
norm_val = numpy.sqrt(sum([nd.norm(grad).asnumpy()[0]**2 for grad in params_grad.values()]))
# print('grad norm: %g' % norm_val)
ratio = 1.0
if norm_val > threshold:
ratio = threshold / norm_val
for grad in params_grad.values():
grad *= ratio
return norm_val
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
"""
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret
def sample_normal(mean, var, rng):
"""Sample from independent normal distributions
Each element is an independent normal distribution.
Parameters
----------
mean : numpy.ndarray
Means of the normal distribution. Shape --> (batch_num, sample_dim)
var : numpy.ndarray
Variance of the normal distribution. Shape --> (batch_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
The sampling result. Shape --> (batch_num, sample_dim)
"""
ret = numpy.sqrt(var) * rng.randn(*mean.shape) + mean
return ret
def sample_mog(prob, mean, var, rng):
"""Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim)
"""
gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32)
mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :]
var = var[numpy.arange(mean.shape[0]), gaussian_inds, :]
ret = sample_normal(mean=mean, var=var, rng=rng)
return ret
def npy_softmax(x, axis=1):
e_x = numpy.exp(x - numpy.max(x, axis=axis, keepdims=True))
out = e_x / e_x.sum(axis=axis, keepdims=True)
return out
def npy_sigmoid(x):
return 1/(1 + numpy.exp(-x))
def npy_onehot(x, num):
ret = numpy.zeros(shape=(x.size, num))
ret[numpy.arange(x.size), x.ravel()] = 1
ret = ret.reshape(x.shape + (num,))
return ret
def npy_binary_entropy(prediction, target):
assert prediction.shape == target.shape
return - (numpy.log(prediction + 1E-9) * target +
numpy.log(1 - prediction + 1E-9) * (1 - target)).sum()
def block_all(sym_list):
return [mx.symbol.BlockGrad(sym) for sym in sym_list]
def load_params(dir_path="", epoch=None, name=""):
prefix = os.path.join(dir_path, name)
_, param_loading_path, _ = get_saving_path(prefix, epoch)
while not os.path.isfile(param_loading_path):
logging.info("in load_param, %s Not Found!" % param_loading_path)
time.sleep(60)
save_dict = nd.load(param_loading_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params, param_loading_path
def load_misc(dir_path="", epoch=None, name=""):
prefix = os.path.join(dir_path, name)
_, _, misc_saving_path = get_saving_path(prefix, epoch)
with open(misc_saving_path, 'r') as fp:
misc = json.load(fp)
return misc
def load_npz(path):
with numpy.load(path) as data:
ret = {k: data[k] for k in data.keys()}
return ret
def discount_cumsum(x, discount):
# See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering
# Here, we have y[t] - discount*y[t+1] = x[t]
# or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
return scipy.signal.lfilter([1], [1, -discount], x[::-1], axis=0)[::-1]
def discount_return(x, discount):
return numpy.sum(x * (discount ** numpy.arange(len(x))))
def update_on_kvstore(kv, params, params_grad):
for ind, k in enumerate(params.keys()):
kv.push(ind, params_grad[k], priority=-ind)
kv.pull(ind, params[k], priority=-ind)
def parse_ctx(ctx_args):
ctx = re.findall('([a-z]+)(\d*)', ctx_args)
ctx = [(device, int(num)) if len(num) > 0 else (device, 0) for device, num in ctx]
return ctx
def get_npy_list(ndarray_list):
"""Get a numpy-array list from a ndarray list
Parameters
----------
ndarray_list : list of NDArray
Returns
-------
ret : list of numpy.ndarray
"""
ret = [v.asnumpy() for v in ndarray_list]
return ret
def get_sym_list(syms, default_names=None, default_shapes=None):
if syms is None and default_names is not None:
if default_shapes is not None:
return [mx.sym.Variable(name=name, shape=shape) for (name, shape)
in zip(default_names, default_shapes)]
else:
return [mx.sym.Variable(name=name) for name in default_names]
assert isinstance(syms, (list, tuple, mx.symbol.Symbol))
if isinstance(syms, (list, tuple)):
if default_names is not None and len(syms) != len(default_names):
raise ValueError("Size of symbols do not match expectation. Received %d, Expected %d. "
"syms=%s, names=%s" %(len(syms), len(default_names),
str(list(sym.name for sym in syms)),
str(default_names)))
return list(syms)
else:
if default_names is not None and len(default_names) != 1:
raise ValueError("Size of symbols do not match expectation. Received 1, Expected %d. "
"syms=%s, names=%s"
% (len(default_names), str([syms.name]), str(default_names)))
return [syms]
def get_numeric_list(values, typ, expected_len=None):
if isinstance(values, numbers.Number):
if expected_len is not None:
return [typ(values)] * expected_len
else:
return [typ(values)]
elif isinstance(values, (list, tuple)):
if expected_len is not None:
assert len(values) == expected_len
try:
ret = [typ(value) for value in values]
return ret
except(ValueError):
print("Need iterable with numeric elements, received: %s" %str(values))
sys.exit(1)
else:
raise ValueError("Unaccepted value type, values=%s" %str(values))
def get_int_list(values, expected_len=None):
return get_numeric_list(values, numpy.int32, expected_len)
def get_float_list(values, expected_len=None):
return get_numeric_list(values, numpy.float32, expected_len)
def get_bucket_key(bucket_kwargs):
assert isinstance(bucket_kwargs, dict)
return tuple(bucket_kwargs.items())
|
dgjustice/ansible | refs/heads/devel | lib/ansible/module_utils/_text.py | 62 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Toshio Kuratomi <a.badger@gmail.com>, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
.. warn:: This module_util is currently internal implementation.
We want to evaluate this code for stability and API suitability before
making backwards compatibility guarantees. The API may change between
releases. Do not use this unless you are willing to port your module code.
"""
import codecs
from ansible.module_utils.six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_escape',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to exceptions or when dealing with an API that needs to take
#: a native string. Example::
#:
#: try:
#: 1//0
#: except ZeroDivisionError as e:
#: raise MyException('Encountered and error: %s' % to_native(e))
if PY3:
to_native = to_text
else:
to_native = to_bytes
|
dymkowsk/mantid | refs/heads/master | Framework/PythonInterface/test/python/plugins/algorithms/CorrectTOFTest.py | 3 | from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import CreateSampleWorkspace, CloneWorkspace, GroupWorkspaces, AddSampleLogMultiple, CreateEmptyTableWorkspace
from testhelpers import run_algorithm
from mantid.api import AnalysisDataService, WorkspaceGroup
from scipy.constants import h, m_n, eV
import numpy as np
class CorrectTOFTest(unittest.TestCase):
def setUp(self):
# create sample workspace
self.xmin = 2123.33867005 + 4005.75
self.xmax = 2123.33867005 + 7995.75
self._input_ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \
A0=0.3;name=Gaussian, PeakCentre=8190, Height=5, Sigma=75", NumBanks=2,
BankPixelWidth=1, XMin=self.xmin, XMax=self.xmax, BinWidth=10.5,
BankDistanceFromSample=4.0, SourceDistanceFromSample=1.4, OutputWorkspace="ws")
lognames = "wavelength,TOF1"
logvalues = "6.0,2123.33867005"
AddSampleLogMultiple(self._input_ws, lognames, logvalues)
# create EPP table
self._table = CreateEmptyTableWorkspace(OutputWorkspace="epptable")
self._table.addColumn(type="double", name="PeakCentre")
table_row = {'PeakCentre': 8189.5}
for i in range(2):
self._table.addRow(table_row)
def tearDown(self):
for wsname in ['ws', 'epptable']:
if AnalysisDataService.doesExist(wsname):
run_algorithm("DeleteWorkspace", Workspace=wsname)
def testCorrection(self):
# tests that correction is done properly
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
velocity = h/(m_n*6.0e-10)
t_el = 4.0e+6/velocity
t_corr = np.arange(self.xmin, self.xmax + 1.0, 10.5) + t_el - (8189.5 - 2123.33867005)
self.assertTrue(np.allclose(t_corr, wsoutput.readX(0))) #sdd = 4
self.assertTrue(np.allclose(t_corr + t_el, wsoutput.readX(1))) #sdd = 8
run_algorithm("DeleteWorkspace", Workspace=wsoutput)
def testGroup(self):
# tests whether the group of workspaces is accepted as an input
ws2 = CloneWorkspace(self._input_ws)
group = GroupWorkspaces([self._input_ws, ws2])
OutputWorkspaceName = "output_wsgroup"
alg_test = run_algorithm("CorrectTOF", InputWorkspace='group', EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
self.assertTrue(isinstance(wsoutput, WorkspaceGroup))
self.assertEqual(2, wsoutput.getNumberOfEntries())
run_algorithm("DeleteWorkspace", Workspace=group)
run_algorithm("DeleteWorkspace", Workspace=wsoutput)
def testConvertUnits(self):
# test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)
# convert units, convert to distribution
alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)
# create reference data for X axis
tof1 = 2123.33867005
dataX = self._input_ws.readX(0) - tof1
tel = 8189.5 - tof1
factor = m_n*1e+15/eV
newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
# compare
# self.assertEqual(newX[0], ws_dE.readX(0)[0])
self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))
# create reference data for Y axis and compare to the output
tof = dataX[:-1] + 5.25
newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
# compare
self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))
run_algorithm("DeleteWorkspace", Workspace=ws_dE)
run_algorithm("DeleteWorkspace", Workspace=wscorr)
if __name__ == "__main__":
unittest.main()
|
dwighthubbard/explorer-hat | refs/heads/master | explorerhat/captouch.py | 1 | """Cap-touch Driver Library for Microchip CAP1xxx ICs
Supports communication over i2c only.
Currently supported ICs:
CAP1208 - 8 Inputs
CAP1188 - 8 Inputs, 8 LEDs
"""
from smbus import SMBus
import atexit
import signal
import sys
import threading
import time
# DEVICE MAP
DEFAULT_ADDR = 0x28
# Supported devices
PID_CAP1208 = 0b01101011
PID_CAP1188 = 0b01010000
# REGISTER MAP
R_MAIN_CONTROL = 0x00
R_GENERAL_STATUS = 0x02
R_INPUT_STATUS = 0x03
R_LED_STATUS = 0x04
R_NOISE_FLAG_STATUS = 0x0A
# Read-only delta counts for all inputs
R_INPUT_1_DELTA = 0x10
R_INPUT_2_DELTA = 0x11
R_INPUT_3_DELTA = 0x12
R_INPUT_4_DELTA = 0x13
R_INPUT_5_DELTA = 0x14
R_INPUT_6_DELTA = 0x15
R_INPUT_7_DELTA = 0x16
R_INPUT_8_DELTA = 0x17
R_SENSITIVITY = 0x1F
R_GENERAL_CONFIG = 0x20
R_INPUT_ENABLE = 0x21
R_INPUT_CONFIG = 0x22
R_INPUT_CONFIG2 = 0x23 # Default 0x00000111
# Values for bits 3 to 0 of R_INPUT_CONFIG2
# Determines minimum amount of time before
# a "press and hold" event is detected.
# Also - Values for bits 3 to 0 of R_INPUT_CONFIG
# Determines rate at which interrupt will repeat
#
# Resolution of 35ms, max = 35 + (35 * 0b1111) = 560ms
R_SAMPLING_CONFIG = 0x24 # Default 0x00111001
R_CALIBRATION = 0x26 # Default 0b00000000
R_INTERRUPT_EN = 0x27 # Default 0b11111111
R_REPEAT_EN = 0x28 # Default 0b11111111
R_MTOUCH_CONFIG = 0x2A # Default 0b11111111
R_MTOUCH_PAT_CONF = 0x2B
R_MTOUCH_PATTERN = 0x2D
R_COUNT_O_LIMIT = 0x2E
R_RECALIBRATION = 0x2F
# R/W Touch detection thresholds for inputs
R_INPUT_1_THRESH = 0x30
R_INPUT_2_THRESH = 0x31
R_INPUT_3_THRESH = 0x32
R_INPUT_4_THRESH = 0x33
R_INPUT_4_THRESH = 0x34
R_INPUT_6_THRESH = 0x35
R_INPUT_7_THRESH = 0x36
R_INPUT_8_THRESH = 0x37
# R/W Noise threshold for all inputs
R_NOISE_THRESH = 0x38
# R/W Standby and Config Registers
R_STANDBY_CHANNEL = 0x40
R_STANDBY_CONFIG = 0x41
R_STANDBY_SENS = 0x42
R_STANDBY_THRESH = 0x43
R_CONFIGURATION2 = 0x44
# Read-only reference counts for sensor inputs
R_INPUT_1_BCOUNT = 0x50
R_INPUT_2_BCOUNT = 0x51
R_INPUT_3_BCOUNT = 0x52
R_INPUT_4_BCOUNT = 0x53
R_INPUT_5_BCOUNT = 0x54
R_INPUT_6_BCOUNT = 0x55
R_INPUT_7_BCOUNT = 0x56
R_INPUT_8_BCOUNT = 0x57
# LED Controls - For CAP1188 and similar
R_LED_OUTPUT_TYPE = 0x71
R_LED_LINKING = 0x72
R_LED_POLARITY = 0x73
R_LED_OUTPUT_CON = 0x74
R_LED_LTRANS_CON = 0x77
R_LED_MIRROR_CON = 0x79
# LED Behaviour
R_LED_BEHAVIOUR_1 = 0x81 # For LEDs 1-4
R_LED_BEHAVIOUR_2 = 0x82 # For LEDs 5-8
R_LED_PULSE_1_PER = 0x84
R_LED_PULSE_2_PER = 0x85
R_LED_BREATHE_PER = 0x86
R_LED_CONFIG = 0x88
R_LED_PULSE_1_DUT = 0x90
R_LED_PULSE_2_DUT = 0x91
R_LED_BREATHE_DUT = 0x92
R_LED_DIRECT_DUT = 0x93
R_LED_DIRECT_RAMP = 0x94
R_LED_OFF_DELAY = 0x95
# R/W Power buttonc ontrol
R_POWER_BUTTON = 0x60
R_POW_BUTTON_CONF = 0x61
# Read-only upper 8-bit calibration values for sensors
R_INPUT_1_CALIB = 0xB1
R_INPUT_2_CALIB = 0xB2
R_INPUT_3_CALIB = 0xB3
R_INPUT_4_CALIB = 0xB4
R_INPUT_5_CALIB = 0xB5
R_INPUT_6_CALIB = 0xB6
R_INPUT_7_CALIB = 0xB7
R_INPUT_8_CALIB = 0xB8
# Read-only 2 LSBs for each sensor input
R_INPUT_CAL_LSB1 = 0xB9
R_INPUT_CAL_LSB2 = 0xBA
# Product ID Registers
R_PRODUCT_ID = 0xFD
R_MANUFACTURER_ID = 0xFE
R_REVISION = 0xFF
## Basic stoppable thread wrapper
#
# Adds Event for stopping the execution loop
# and exiting cleanly.
class StoppableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.daemon = True
def start(self):
if self.isAlive() == False:
self.stop_event.clear()
threading.Thread.start(self)
def stop(self):
if self.isAlive() == True:
# set event to signal thread to terminate
self.stop_event.set()
# block calling thread until thread really has terminated
self.join()
## Basic thread wrapper class for asyncronously running functions
#
# Basic thread wrapper class for running functions
# asyncronously. Return False from your function
# to abort looping.
class AsyncWorker(StoppableThread):
def __init__(self, todo):
StoppableThread.__init__(self)
self.todo = todo
def run(self):
while self.stop_event.is_set() == False:
if self.todo() == False:
self.stop_event.set()
break
class Cap1xxx():
supported = [PID_CAP1208, PID_CAP1188]
def __init__(self, i2c_addr=DEFAULT_ADDR, i2c_bus=1, on_touch=[None] * 8):
self.async_poll = None
self.i2c_addr = i2c_addr
self.i2c = SMBus(i2c_bus)
self.count = 0
self._delta = 50
self.handlers = {
'press': [None] * 8,
'release': [None] * 8,
'held': [None] * 8
}
self.touch_handlers = on_touch
self.last_input_status = [False] * 8
self.input_status = ['none'] * 8
self.input_pressed = [False] * 8
self.repeat_enabled = 0b00000000
self.release_enabled = 0b11111111
self.product_id = self._get_product_id()
if not self.product_id in self.supported:
raise Exception("Product ID {} not supported!".format(self.product_id))
# Enable all inputs with interrupt by default
self.enable_inputs(0b11111111)
self.enable_interrupts(0b11111111)
# Disable repeat for all channels, but give
# it sane defaults anyway
self.enable_repeat(0b00000000)
self.enable_multitouch(True)
self.set_hold_delay(210)
self.set_repeat_rate(210)
atexit.register(self.stop_watching)
def get_input_status(self):
"""Get the status of all inputs.
Returns an array of 8 boolean values indicating
whether an input has been triggered since the
interrupt flag was last cleared."""
touched = self._read_byte(R_INPUT_STATUS)
threshold = self._read_block(R_INPUT_1_THRESH, 8)
delta = self._read_block(R_INPUT_1_DELTA, 8)
# status = ['none'] * 8
for x in range(8):
if (1 << x) & touched:
status = 'none'
_delta = self._get_twos_comp(delta[x])
# threshold = self._read_byte(R_INPUT_1_THRESH + x)
# print('Got event with delta: {}, thresh: {}'.format(_delta, threshold[x]))
# We only ever want to detect PRESS events
# If repeat is disabled, and release detect is enabled
if _delta >= threshold[x]: # self._delta:
# Touch down event
if self.input_status[x] in ['press', 'held']:
if self.repeat_enabled & (1 << x):
status = 'held'
if self.input_status[x] in ['none', 'release']:
if self.input_pressed[x]:
status = 'none'
else:
status = 'press'
else:
# Touch release event
if self.release_enabled & (1 << x) and not self.input_status[x] == 'release':
status = 'release'
else:
status = 'none'
self.input_status[x] = status
self.input_pressed[x] = status in ['press', 'held', 'none']
else:
self.input_status[x] = 'none'
self.input_pressed[x] = False
return self.input_status
def _get_twos_comp(self, val):
if (val & (1 << (8 - 1))) != 0:
val = val - (1 << 8)
return val
def clear_interrupt(self):
"""Clear the interrupt flag, bit 0, of the
main control register"""
main = self._read_byte(R_MAIN_CONTROL)
main &= ~0b00000001
self._write_byte(R_MAIN_CONTROL, main)
def wait_for_interrupt(self, timeout=100):
"""Wait for, interrupt, bit 0 of the main
control register to be set, indicating an
input has been triggered."""
start = self._millis()
while True:
status = self._read_byte(R_MAIN_CONTROL)
if status & 1:
return True
if self._millis() > start + timeout:
return False
time.sleep(0.000001)
def on(self, channel=0, event='press', handler=None):
self.handlers[event][channel] = handler
self.start_watching()
return True
def start_watching(self):
if self.async_poll == None:
self.async_poll = AsyncWorker(self._poll)
self.async_poll.start()
return True
return False
def stop_watching(self):
if not self.async_poll == None:
self.async_poll.stop()
self.async_poll = None
return True
return False
def set_touch_delta(self, delta):
self._delta = delta
def set_hold_delay(self, ms):
"""Set time before a press and hold is detected,
Clamps to multiples of 35 from 35 to 560"""
repeat_rate = self._calc_touch_rate(ms)
input_config = self._read_byte(R_INPUT_CONFIG2)
input_config = (input_config & ~0b1111) | repeat_rate
self._write_byte(R_INPUT_CONFIG2, input_config)
def set_repeat_rate(self, ms):
"""Set repeat rate in milliseconds,
Clamps to multiples of 35 from 35 to 560"""
repeat_rate = self._calc_touch_rate(ms)
input_config = self._read_byte(R_INPUT_CONFIG)
input_config = (input_config & ~0b1111) | repeat_rate
self._write_byte(R_INPUT_CONFIG, input_config)
def _calc_touch_rate(self, ms):
ms = min(max(ms, 0), 560)
scale = int((round(ms / 35.0) * 35) - 35) / 35
return scale
def _poll(self):
"""Single polling pass, should be called in
a loop, preferably threaded."""
self.count += 1
if self.wait_for_interrupt():
inputs = self.get_input_status()
for x in range(8):
self._trigger_handler(x, inputs[x])
self.clear_interrupt()
# if self.count > 10:
# Force recalibration on fruit pads
# self._write_byte(0x26, 0b00001111)
# self.count = 0
def _trigger_handler(self, channel, event):
if event == 'none':
return
if callable(self.handlers[event][channel]):
self.handlers[event][channel](channel, event)
def _get_product_id(self):
return self._read_byte(R_PRODUCT_ID)
def enable_multitouch(self, en=True):
"""Toggles multi-touch by toggling the multi-touch
block bit in the config register"""
ret_mt = self._read_byte(R_MTOUCH_CONFIG)
if en:
self._write_byte(R_MTOUCH_CONFIG, ret_mt & ~0x80)
else:
self._write_byte(R_MTOUCH_CONFIG, ret_mt | 0x80)
def enable_repeat(self, inputs):
self.repeat_enabled = inputs
self._write_byte(R_REPEAT_EN, inputs)
def enable_interrupts(self, inputs):
self._write_byte(R_INTERRUPT_EN, inputs)
def enable_inputs(self, inputs):
self._write_byte(R_INPUT_ENABLE, inputs)
def _write_byte(self, register, value):
self.i2c.write_byte_data(self.i2c_addr, register, value)
def _read_byte(self, register):
return self.i2c.read_byte_data(self.i2c_addr, register)
def _read_block(self, register, length):
return self.i2c.read_i2c_block_data(self.i2c_addr, register, length)
def _millis(self):
return int(round(time.time() * 1000))
def __del__(self):
self.stop_watching()
class Cap1208(Cap1xxx):
supported = [PID_CAP1208]
class Cap1188(Cap1xxx):
supported = [PID_CAP1188]
|
Terae/sujet_info | refs/heads/master | write_words/sample_frozen.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 23 20:25:16 2017
@author: memo
demonstrates inference with frozen graph def
same as sample.py, but:
- instead of loading model + checkpoint, loads frozen graph
- instead of calling model.sample() function, uses own sample() function with named ops
"""
import numpy as np
import tensorflow as tf
import time
import os
import pickle
import argparse
from utils import *
from model import Model
import random
import svgwrite
from IPython.display import SVG, display
# main code (not in a main function since I want to run this script in IPython as well).
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, default='sample',
help='filename of .svg file to output, without .svg')
parser.add_argument('--sample_length', type=int, default=800,
help='number of strokes to sample')
parser.add_argument('--scale_factor', type=int, default=10,
help='factor to scale down by for svg output. smaller means bigger output')
parser.add_argument('--model_dir', type=str, default='save',
help='directory to save model to')
sample_args = parser.parse_args()
sess = tf.InteractiveSession()
# load frozen graph
from tensorflow.python.platform import gfile
with gfile.FastGFile(os.path.join(sample_args.model_dir, 'graph_frz.pb'),'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
def sample_stroke():
# don't call model.sample(), instead call sample() function defined below
[strokes, params] = sample(sess, sample_args.sample_length)
draw_strokes(strokes, factor=sample_args.scale_factor, svg_filename = sample_args.filename+'.normal.svg')
draw_strokes_random_color(strokes, factor=sample_args.scale_factor, svg_filename = sample_args.filename+'.color.svg')
draw_strokes_random_color(strokes, factor=sample_args.scale_factor, per_stroke_mode = False, svg_filename = sample_args.filename+'.multi_color.svg')
draw_strokes_eos_weighted(strokes, params, factor=sample_args.scale_factor, svg_filename = sample_args.filename+'.eos_pdf.svg')
draw_strokes_pdf(strokes, params, factor=sample_args.scale_factor, svg_filename = sample_args.filename+'.pdf.svg')
return [strokes, params]
# copied straight from model.sample, but replaced all referenes to 'self' with named ops
def sample(sess, num=1200):
data_in = 'data_in:0'
data_out_pi = 'data_out_pi:0'
data_out_mu1 = 'data_out_mu1:0'
data_out_mu2 = 'data_out_mu2:0'
data_out_sigma1 = 'data_out_sigma1:0'
data_out_sigma2 = 'data_out_sigma2:0'
data_out_corr = 'data_out_corr:0'
data_out_eos = 'data_out_eos:0'
state_in = 'state_in:0'
state_out = 'state_out:0'
def get_pi_idx(x, pdf):
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
print('error with sampling ensemble')
return -1
def sample_gaussian_2d(mu1, mu2, s1, s2, rho):
mean = [mu1, mu2]
cov = [[s1*s1, rho*s1*s2], [rho*s1*s2, s2*s2]]
x = np.random.multivariate_normal(mean, cov, 1)
return x[0][0], x[0][1]
prev_x = np.zeros((1, 1, 3), dtype=np.float32)
prev_x[0, 0, 2] = 1 # initially, we want to see beginning of new stroke
prev_state = sess.run(state_in)
strokes = np.zeros((num, 3), dtype=np.float32)
mixture_params = []
for i in range(num):
feed = {data_in: prev_x, state_in:prev_state}
[o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr, o_eos, next_state] = sess.run([data_out_pi, data_out_mu1, data_out_mu2, data_out_sigma1, data_out_sigma2, data_out_corr, data_out_eos, state_out],feed)
idx = get_pi_idx(random.random(), o_pi[0])
eos = 1 if random.random() < o_eos[0][0] else 0
next_x1, next_x2 = sample_gaussian_2d(o_mu1[0][idx], o_mu2[0][idx], o_sigma1[0][idx], o_sigma2[0][idx], o_corr[0][idx])
strokes[i,:] = [next_x1, next_x2, eos]
params = [o_pi[0], o_mu1[0], o_mu2[0], o_sigma1[0], o_sigma2[0], o_corr[0], o_eos[0]]
mixture_params.append(params)
prev_x = np.zeros((1, 1, 3), dtype=np.float32)
prev_x[0][0] = np.array([next_x1, next_x2, eos], dtype=np.float32)
prev_state = next_state
strokes[:,0:2] *= 20 #self.args.data_scale # TODO: fix mega hack hardcoding the scale
return strokes, mixture_params
# check output
[strokes, params] = sample_stroke()
|
Jgarcia-IAS/localizacion | refs/heads/master | openerp/addons-extra/odoo-pruebas/odoo-server/addons/payment_buckaroo/tests/test_buckaroo.py | 321 | # -*- coding: utf-8 -*-
from lxml import objectify
import urlparse
import openerp
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.tools import mute_logger
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(False)
class BuckarooCommon(PaymentAcquirerCommon):
def setUp(self):
super(BuckarooCommon, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the buckaroo account
model, self.buckaroo_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_buckaroo', 'payment_acquirer_buckaroo')
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(False)
class BuckarooForm(BuckarooCommon):
def test_10_Buckaroo_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None)
self.assertEqual(buckaroo.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
form_values = {
'add_returndata': None,
'Brq_websitekey': buckaroo.brq_websitekey,
'Brq_amount': '2240.0',
'Brq_currency': 'EUR',
'Brq_invoicenumber': 'SO004',
'Brq_signature': '1b8c10074c622d965272a91a9e88b5b3777d2474', # update me
'brq_test': 'True',
'Brq_return': '%s' % urlparse.urljoin(self.base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(self.base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(self.base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(self.base_url, BuckarooController._reject_url),
'Brq_culture': 'en-US',
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.buckaroo_id,
'SO004', 2240.0, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 2240.0,
'acquirer_id': self.buckaroo_id,
'currency_id': self.currency_euro_id,
'reference': 'SO004',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.buckaroo_id,
'should_be_erased', 2240.0, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_buckaroo.models.buckaroo', 'ValidationError')
def test_20_buckaroo_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None)
self.assertEqual(buckaroo.environment, 'test', 'test without test environment')
# typical data posted by buckaroo after client has successfully paid
buckaroo_post_data = {
'BRQ_RETURNDATA': u'',
'BRQ_AMOUNT': u'2240.00',
'BRQ_CURRENCY': u'EUR',
'BRQ_CUSTOMER_NAME': u'Jan de Tester',
'BRQ_INVOICENUMBER': u'SO004',
'BRQ_PAYMENT': u'573311D081B04069BD6336001611DBD4',
'BRQ_PAYMENT_METHOD': u'paypal',
'BRQ_SERVICE_PAYPAL_PAYERCOUNTRY': u'NL',
'BRQ_SERVICE_PAYPAL_PAYEREMAIL': u'fhe@openerp.com',
'BRQ_SERVICE_PAYPAL_PAYERFIRSTNAME': u'Jan',
'BRQ_SERVICE_PAYPAL_PAYERLASTNAME': u'Tester',
'BRQ_SERVICE_PAYPAL_PAYERMIDDLENAME': u'de',
'BRQ_SERVICE_PAYPAL_PAYERSTATUS': u'verified',
'BRQ_SIGNATURE': u'175d82dd53a02bad393fee32cb1eafa3b6fbbd91',
'BRQ_STATUSCODE': u'190',
'BRQ_STATUSCODE_DETAIL': u'S001',
'BRQ_STATUSMESSAGE': u'Transaction successfully processed',
'BRQ_TEST': u'true',
'BRQ_TIMESTAMP': u'2014-05-08 12:41:21',
'BRQ_TRANSACTIONS': u'D6106678E1D54EEB8093F5B3AC42EA7B',
'BRQ_WEBSITEKEY': u'5xTGyGyPyl',
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 2240.0,
'acquirer_id': self.buckaroo_id,
'currency_id': self.currency_euro_id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'Buckaroo: validation did not put tx into done state')
self.assertEqual(tx.buckaroo_txnid, buckaroo_post_data.get('BRQ_TRANSACTIONS'), 'Buckaroo: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'buckaroo_txnid': False})
# now buckaroo post is ok: try to modify the SHASIGN
buckaroo_post_data['BRQ_SIGNATURE'] = '54d928810e343acf5fb0c3ee75fd747ff159ef7a'
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# simulate an error
buckaroo_post_data['BRQ_STATUSCODE'] = 2
buckaroo_post_data['BRQ_SIGNATURE'] = '4164b52adb1e6a2221d3d8a39d8c3e18a9ecb90b'
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'Buckaroo: erroneous validation did not put tx into error state')
|
skywin/p2pool | refs/heads/master | SOAPpy/URLopener.py | 294 | """Provide a class for loading data from URL's that handles basic
authentication"""
ident = '$Id: URLopener.py 541 2004-01-31 04:20:06Z warnes $'
from version import __version__
from Config import Config
from urllib import FancyURLopener
class URLopener(FancyURLopener):
username = None
passwd = None
def __init__(self, username=None, passwd=None, *args, **kw):
FancyURLopener.__init__( self, *args, **kw)
self.username = username
self.passwd = passwd
def prompt_user_passwd(self, host, realm):
return self.username, self.passwd
|
chengjf/database-interface-doc-management | refs/heads/master | flask-demo/flask/Lib/collections/__main__.py | 213 | ################################################################################
### Simple tests
################################################################################
# verify that instances can be pickled
from collections import namedtuple
from pickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print (p)
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print(Point(11, 22)._replace(x=100))
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print(Point3D.__doc__)
import doctest, collections
TestResults = namedtuple('TestResults', 'failed attempted')
print(TestResults(*doctest.testmod(collections)))
|
damdam-s/purchase-workflow | refs/heads/8.0 | __unported__/product_by_supplier/__init__.py | 15 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Bitl/RBXLegacy-src | refs/heads/stable | Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/restudy.py | 62 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class RestudyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?restudy\.dk/video/play/id/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.restudy.dk/video/play/id/1637',
'info_dict': {
'id': '1637',
'ext': 'flv',
'title': 'Leiden-frosteffekt',
'description': 'Denne video er et eksperiment med flydende kvælstof.',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
formats = self._extract_smil_formats(
'https://www.restudy.dk/awsmedia/SmilDirectory/video_%s.xml' % video_id,
video_id)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
}
|
akosyakov/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/StubsOfNestedClasses/b.py | 79 | from c import Class1
class Class2(Class1):
class SubClass2(Class1.SubClass1):
def __init__(self, foo):
Class1.SubClass1.__init__(self, foo)
|
SirEdvin/Pandas-Pipe | refs/heads/master | tests/converter_test.py | 1 | import pandas as pd
from base import converter_test, ConstantDataSource
def test_simple_converter():
df = pd.DataFrame(
{
't1': [1, 2, 3],
't2': [-1, -2, -3]
}
)
result_df = pd.DataFrame(
{
't1': [2, 4, 6],
't2': [-1, -2, -3]
}
).sort_index(axis=1)
converter_test.append(ConstantDataSource, construct_arguments=[df])
output_df = converter_test.process(output_channels=['root']).sort_index(axis=1)
assert output_df.equals(result_df)
|
thaumos/ansible | refs/heads/devel | lib/ansible/modules/network/netscaler/netscaler_service.py | 36 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_service
short_description: Manage service configuration in Netscaler
description:
- Manage service configuration in Netscaler.
- This module allows the creation, deletion and modification of Netscaler services.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
- This module supports check mode.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the service has been created.
- "Minimum length = 1"
ip:
description:
- "IP to assign to the service."
- "Minimum length = 1"
servername:
description:
- "Name of the server that hosts the service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol in which data is exchanged with the service."
port:
description:
- "Port number of the service."
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
cleartextport:
description:
- >-
Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic.
Applicable to transparent SSL services.
- "Minimum value = 1"
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections to the service."
- "Minimum value = 0"
- "Maximum value = 4294967294"
healthmonitor:
description:
- "Monitor the health of this service"
default: yes
type: bool
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = 0"
- "Maximum value = 65535"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward requests to the cache server."
- "Note: Do not specify this parameter if you set the Cache Type parameter."
default: no
type: bool
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6
address as its value. Used if the server needs the client's IP address for security, accounting, or
other purposes, and setting the Use Source IP parameter is not a viable option.
cipheader:
description:
- >-
Name for the HTTP header whose value must be set to the IP address of the client. Used with the
Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the
header, the appliance uses the header name specified for the global Client IP Header parameter (the
cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the
Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global
Client IP Header parameter is not specified, the appliance inserts a header with the name
"client-ip.".
- "Minimum length = 1"
usip:
description:
- >-
Use the client's IP address as the source IP address when initiating a connection to the server. When
creating a service, if you do not set this parameter, the service inherits the global Use Source IP
setting (available in the enable ns mode and disable ns mode CLI commands, or in the System >
Settings > Configure modes > Configure Modes dialog box). However, you can override this setting
after you create the service.
type: bool
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES."
type: bool
sp:
description:
- "Enable surge protection for the service."
type: bool
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service."
default: off
type: bool
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
customserverid:
description:
- >-
Unique identifier for the service. Used when the persistency type for the virtual server is set to
Custom Server ID.
default: 'None'
serverid:
description:
- "The identifier for the service. This is used when the persistency type is set to Custom Server ID."
cka:
description:
- "Enable client keep-alive for the service."
type: bool
tcpb:
description:
- "Enable TCP buffering for the service."
type: bool
cmp:
description:
- "Enable compression for the service."
type: bool
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated to the service."
- "Minimum value = 0"
- "Maximum value = 4294967287"
accessdown:
description:
- >-
Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service
is DOWN, and this parameter is disabled, the packets are dropped.
default: no
type: bool
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = 0"
- "Maximum value = 65535"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do
not enable this option for applications that must complete their transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
hashid:
description:
- >-
A numerical identifier that can be used by hash based load balancing methods. Must be unique for each
service.
- "Minimum value = 1"
comment:
description:
- "Any information about the service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information."
netprofile:
description:
- "Network profile to use for the service."
- "Minimum length = 1"
- "Maximum length = 127"
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = 0"
- "Maximum value = 4094"
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a service in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the service. DNS profile properties will applied to the
transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.
- "Minimum length = 1"
- "Maximum length = 127"
ipaddress:
description:
- "The new IP address of the service."
graceful:
description:
- >-
Shut down gracefully, not accepting any new connections, and disabling the service when all of its
connections are closed.
default: no
type: bool
monitor_bindings:
description:
- A list of load balancing monitors to bind to this service.
- Each monitor entry is a dictionary which may contain the following options.
- Note that if not using the built in monitors they must first be setup.
suboptions:
monitorname:
description:
- Name of the monitor.
weight:
description:
- Weight to assign to the binding between the monitor and service.
dup_state:
choices:
- 'enabled'
- 'disabled'
description:
- State of the monitor.
- The state setting for a monitor of a given type affects all monitors of that type.
- For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled.
- If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
dup_weight:
description:
- Weight to assign to the binding between the monitor and service.
disabled:
description:
- When set to C(yes) the service state will be set to DISABLED.
- When set to C(no) the service state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Monitor monitor-1 must have been already setup
- name: Setup http service
gather_facts: False
delegate_to: localhost
netscaler_service:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: service-http-1
servicetype: HTTP
ipaddress: 10.78.0.1
port: 80
monitor_bindings:
- monitor-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
diff:
description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines,
get_immutables_intersection)
def service_exists(client, module):
if service.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def service_identical(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = service_proxy.diff_object(service_list[0])
# the actual ip address is stored in the ipaddress attribute
# of the retrieved object
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def diff(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_object = service_proxy.diff_object(service_list[0])
if 'ip' in diff_object:
del diff_object['ip']
return diff_object
def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs):
bindings = {}
if module.params['monitor_bindings'] is not None:
for binding in module.params['monitor_bindings']:
attribute_values_dict = copy.deepcopy(binding)
# attribute_values_dict['servicename'] = module.params['name']
attribute_values_dict['servicegroupname'] = module.params['name']
binding_proxy = ConfigProxy(
actual=lbmonitor_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=monitor_bindings_rw_attrs,
)
key = binding_proxy.monitorname
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
bindings = {}
if service_lbmonitor_binding.count(client, module.params['name']) == 0:
return bindings
# Fallthrough to rest of execution
for binding in service_lbmonitor_binding.get(client, module.params['name']):
# Excluding default monitors since we cannot operate on them
if binding.monitor_name in ('tcp-default', 'ping-default'):
continue
key = binding.monitor_name
actual = lbmonitor_service_binding()
actual.weight = binding.weight
actual.monitorname = binding.monitor_name
actual.dup_weight = binding.dup_weight
actual.servicename = module.params['name']
bindings[key] = actual
return bindings
def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_proxys.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for monitor_name in configured_key_set:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
diff_dict = proxy.diff_object(actual)
if 'servicegroupname' in diff_dict:
if proxy.servicegroupname == actual.servicename:
del diff_dict['servicegroupname']
if len(diff_dict) > 0:
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_keyset = set(configured_proxys.keys())
actual_keyset = set(actual_bindings.keys())
# Delete extra
delete_keys = list(actual_keyset - configured_keyset)
for monitor_name in delete_keys:
log('Deleting binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual_bindings[monitor_name])
# Delete and re-add modified
common_keyset = list(configured_keyset & actual_keyset)
for monitor_name in common_keyset:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
if not proxy.has_equal_attributes(actual):
log('Deleting and re adding binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual)
proxy.add()
# Add new
new_keys = list(configured_keyset - actual_keyset)
for monitor_name in new_keys:
log('Adding binding for monitor %s' % monitor_name)
configured_proxys[monitor_name].add()
def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs)
def do_state_change(client, module, service_proxy):
if module.params['disabled']:
log('Disabling service')
result = service.disable(client, service_proxy.actual)
else:
log('Enabling service')
result = service.enable(client, service_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ip=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX'
]
),
port=dict(type='int'),
cleartextport=dict(type='int'),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
healthmonitor=dict(
type='bool',
default=True,
),
maxreq=dict(type='float'),
cacheable=dict(
type='bool',
default=False,
),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
useproxyport=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(
type='bool',
default=False,
),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
customserverid=dict(
type='str',
default='None',
),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
accessdown=dict(
type='bool',
default=False
),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
netprofile=dict(type='str'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
dnsprofilename=dict(type='str'),
ipaddress=dict(type='str'),
graceful=dict(
type='bool',
default=False,
),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Fallthrough to rest of execution
# Instantiate Service Config object
readwrite_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'maxclient',
'healthmonitor',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'useproxyport',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'customserverid',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'accessdown',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'hashid',
'comment',
'appflowlog',
'netprofile',
'processlocal',
'dnsprofilename',
'ipaddress',
'graceful',
]
readonly_attrs = [
'numofconnections',
'policyname',
'serviceconftype',
'serviceconftype2',
'value',
'gslb',
'dup_state',
'publicip',
'publicport',
'svrstate',
'monitor_state',
'monstatcode',
'lastresponse',
'responsetime',
'riseapbrstatsmsgcode2',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'stateupdatereason',
'clmonowner',
'clmonview',
'serviceipstr',
'oracleserverversion',
]
immutable_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'cipheader',
'serverid',
'state',
'td',
'monitor_name_svc',
'riseapbrstatsmsgcode',
'graceful',
'all',
'Internal',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'accessdown': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
monitor_bindings_rw_attrs = [
'servicename',
'servicegroupname',
'dup_state',
'dup_weight',
'monitorname',
'weight',
]
# Translate module arguments to correspondign config oject attributes
if module.params['ip'] is None:
module.params['ip'] = module.params['ipaddress']
service_proxy = ConfigProxy(
actual=service(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not service_exists(client, module):
if not module.check_mode:
service_proxy.add()
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, service_proxy)
immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
# Service sync
if not service_identical(client, module, service_proxy):
if not module.check_mode:
service_proxy.update()
# Monitor bindings sync
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
if not module.check_mode:
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, service_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not service_exists(client, module):
module.fail_json(msg='Service does not exist', **module_result)
if not service_identical(client, module, service_proxy):
module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result)
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if service_exists(client, module):
if not module.check_mode:
service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if service_exists(client, module):
module.fail_json(msg='Service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
google/contentbox | refs/heads/master | third_party/django/core/mail/backends/locmem.py | 227 | """
Backend for test environment.
"""
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""A email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super(EmailBackend, self).__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
for message in messages: # .message() triggers header validation
message.message()
mail.outbox.extend(messages)
return len(messages)
|
drexly/tonginBlobStore | refs/heads/master | lib/django/db/models/fields/related.py | 25 | from __future__ import unicode_literals
import warnings
from functools import partial
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.db import connection, router
from django.db.backends import utils
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_docs_version
from . import (
AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from .related_descriptors import (
ForwardManyToOneDescriptor, ManyToManyDescriptor,
ReverseManyToOneDescriptor, ReverseOneToOneDescriptor,
)
from .related_lookups import (
RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn,
RelatedLessThan, RelatedLessThanOrEqual,
)
from .reverse_related import (
ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel,
)
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
def add_lazy_relation(cls, field, relation, operation):
warnings.warn(
"add_lazy_relation() has been superseded by lazy_related_operation() "
"and related methods on the Apps class.",
RemovedInDjango20Warning, stacklevel=2)
# Rearrange args for new Apps.lazy_model_operation
function = lambda local, related, field: operation(field, related, local)
lazy_related_operation(function, cls, relation, field=field)
class RelatedField(Field):
"""
Base class that all relational fields inherit from.
"""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.remote_field.related_name
if related_name is None:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.remote_field.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in self.opts.apps.get_models()
rel_is_string = isinstance(self.remote_field.model, six.string_types)
model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name
if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.remote_field.model not in self.opts.apps.get_models() and
not isinstance(self.remote_field.model, six.string_types) and
self.remote_field.model._meta.swapped):
model = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
"""
Check accessor and reverse query name clashes.
"""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.remote_field.model` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.remote_field.is_hidden():
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.remote_field.model._meta
# rel_opts.object_name == "Target"
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, virtual_only=False):
super(RelatedField, self).contribute_to_class(cls, name, virtual_only=virtual_only)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = force_text(self.remote_field.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.remote_field.related_name = related_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self)
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = {
rh_field.attname: getattr(obj, lh_field.attname)
for lh_field, rh_field in self.related_fields
}
base_filter.update(self.get_extra_descriptor_filter(obj) or {})
return base_filter
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = (
self.name or
(self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name)
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.remote_field instead.",
RemovedInDjango110Warning, 2)
return self.remote_field
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field was asked for")
return target_fields[0]
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation, supports multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None,
related_query_name=None, limit_choices_to=None, parent_link=False,
swappable=True, **kwargs):
if rel is None:
rel = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super(ForeignObject, self).__init__(rel=rel, **kwargs)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
unique_foreign_fields = {
frozenset([f.name])
for f in self.remote_field.model._meta.get_fields()
if getattr(f, 'unique', False)
}
unique_foreign_fields.update({
frozenset(ut)
for ut in self.remote_field.model._meta.unique_together
})
foreign_fields = {f.name for f in self.foreign_related_fields}
has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields)
if not has_unique_constraint and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"No subset of the fields %s on model '%s' is unique."
% (field_combination, model_name),
hint=(
"Add unique=True on any of those fields or add at "
"least a subset of them to a unique_together constraint."
),
obj=self,
id='fields.E310',
)
]
elif not has_unique_constraint:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['on_delete'] = self.remote_field.on_delete
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
if self.remote_field.parent_link:
kwargs['parent_link'] = self.remote_field.parent_link
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.remote_field.model, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.remote_field.model)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.remote_field.model._meta.pk if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def get_lookup(self, lookup_name):
if lookup_name == 'in':
return RelatedIn
elif lookup_name == 'exact':
return RelatedExact
elif lookup_name == 'gt':
return RelatedGreaterThan
elif lookup_name == 'gte':
return RelatedGreaterThanOrEqual
elif lookup_name == 'lt':
return RelatedLessThan
elif lookup_name == 'lte':
return RelatedLessThanOrEqual
elif lookup_name != 'isnull':
raise TypeError('Related Field got invalid lookup: %s' % lookup_name)
return super(ForeignObject, self).get_lookup(lookup_name)
def get_transform(self, *args, **kwargs):
raise NotImplementedError('Relational fields do not support transforms.')
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ForwardManyToOneDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, on_delete=None, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
on_delete, to_field = to_field, on_delete
kwargs['rel'] = self.rel_class(
self, to, to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs['db_index'] = kwargs.get('db_index', True)
super(ForeignKey, self).__init__(
to, on_delete, from_fields=['self'], to_fields=[to_field], **kwargs)
self.db_constraint = db_constraint
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.remote_field, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.remote_field.field_name
return name, path, args, kwargs
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.remote_field.model._default_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.remote_field.model._meta.verbose_name, 'pk': value,
'field': self.remote_field.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.target_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.remote_field.model, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.remote_field.model))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
'to_field_name': self.remote_field.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.target_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.target_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete=None, to_field=None, **kwargs):
kwargs['unique'] = True
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
to_field = on_delete
on_delete = CASCADE # Avoid warning in superclass
super(OneToOneField, self).__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = '%s_%s' % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = 'to_%s' % to
from_ = 'from_%s' % from_
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
)
})
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=True, db_table=None,
swappable=True, **kwargs):
try:
to._meta
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
if symmetrical is None:
symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT)
if through is not None:
assert db_table is None, (
"Cannot specify a db_table if an intermediary model is used."
)
kwargs['rel'] = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = 'null' in kwargs
super(ManyToManyField, self).__init__(**kwargs)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label, self.remote_field.through.__name__)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.remote_field.symmetrical and
not self.remote_field.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.remote_field.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
seen_to = sum(to_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.remote_field.through,
id='fields.E336',
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (len(self.remote_field.through_fields) >= 2 and
self.remote_field.through_fields[0] and self.remote_field.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = from_model, self.remote_field.through, self.remote_field.model
source_field_name, target_field_name = self.remote_field.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'remote_field') and
getattr(field.remote_field, 'model', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs['db_constraint'] = self.remote_field.db_constraint
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
# Rel needs more work.
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
if getattr(self.remote_field, 'through', None) is not None:
if isinstance(self.remote_field.through, six.string_types):
kwargs['through'] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (f.is_relation and f.remote_field.model == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True))
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'remote_field')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'remote_field')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"""
Return the value of this field in the given model instance.
"""
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
|
2014cdag2/w17x1 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/version.py | 607 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Simply the current installed pygame version. The version information is
stored in the regular pygame module as 'pygame.ver'. Keeping the version
information also available in a separate module allows you to test the
pygame version without importing the main pygame module.
The python version information should always compare greater than any previous
releases. (hmm, until we get to versions > 10)
"""
ver = '1.8.0pre'
vernum = 1,8,0
|
Yen-Chung-En/w16b_test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/jqueryui/__init__.py | 603 | """Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.js', ['jQuery'])
javascript.load(_path+'jquery-ui.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
|
dennybaa/st2 | refs/heads/master | st2common/st2common/persistence/keyvalue.py | 4 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.persistence.base import Access
from st2common.models.db import keyvalue
from st2common.models.api.keyvalue import KeyValuePairAPI
from st2common.models.system.common import ResourceReference
from st2common.constants.triggers import KEY_VALUE_PAIR_CREATE_TRIGGER
from st2common.constants.triggers import KEY_VALUE_PAIR_UPDATE_TRIGGER
from st2common.constants.triggers import KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER
from st2common.constants.triggers import KEY_VALUE_PAIR_DELETE_TRIGGER
class KeyValuePair(Access):
impl = keyvalue.keyvaluepair_access
publisher = None
api_model_cls = KeyValuePairAPI
dispatch_trigger_for_operations = ['create', 'update', 'value_change', 'delete']
operation_to_trigger_ref_map = {
'create': ResourceReference.to_string_reference(
name=KEY_VALUE_PAIR_CREATE_TRIGGER['name'],
pack=KEY_VALUE_PAIR_CREATE_TRIGGER['pack']),
'update': ResourceReference.to_string_reference(
name=KEY_VALUE_PAIR_UPDATE_TRIGGER['name'],
pack=KEY_VALUE_PAIR_UPDATE_TRIGGER['pack']),
'value_change': ResourceReference.to_string_reference(
name=KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER['name'],
pack=KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER['pack']),
'delete': ResourceReference.to_string_reference(
name=KEY_VALUE_PAIR_DELETE_TRIGGER['name'],
pack=KEY_VALUE_PAIR_DELETE_TRIGGER['pack']),
}
@classmethod
def add_or_update(cls, model_object, publish=True, dispatch_trigger=True):
"""
Note: We override add_or_update because we also want to publish high level "value_change"
event for this resource.
"""
if model_object.id:
existing_model_object = cls.get_by_id(value=model_object.id)
else:
# Not an update
existing_model_object = None
model_object = super(KeyValuePair, cls).add_or_update(model_object=model_object,
publish=publish,
dispatch_trigger=dispatch_trigger)
# Dispatch a value_change event which is specific to this resource
if existing_model_object and existing_model_object.value != model_object.value:
cls.dispatch_value_change_trigger(old_model_object=existing_model_object,
new_model_object=model_object)
return model_object
@classmethod
def dispatch_value_change_trigger(cls, old_model_object, new_model_object):
operation = 'value_change'
trigger = cls._get_trigger_ref_for_operation(operation=operation)
old_object_payload = cls.api_model_cls.from_model(old_model_object,
mask_secrets=True).__json__()
new_object_payload = cls.api_model_cls.from_model(new_model_object,
mask_secrets=True).__json__()
payload = {
'old_object': old_object_payload,
'new_object': new_object_payload
}
return cls._dispatch_trigger(operation=operation, trigger=trigger, payload=payload)
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_by_object(cls, object):
# For KeyValuePair name is unique.
name = getattr(object, 'name', '')
return cls.get_by_name(name)
|
andim27/magiccamp | refs/heads/master | django/contrib/gis/tests/relatedapp/tests.py | 25 | import os, unittest
from django.contrib.gis.geos import *
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, postgis, spatialite, no_mysql, no_oracle, no_spatialite
from django.conf import settings
from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
cities = (('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
class RelatedGeoModelTest(unittest.TestCase):
def test01_setup(self):
"Setting up for related model tests."
for name, state, lon, lat in cities:
loc = Location.objects.create(point=Point(lon, lat))
c = City.objects.create(name=name, state=state, location=loc)
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes Roswell.
all_extent = (-104.528060913086, 33.0583305358887,-79.4607315063477, 40.1847610473633)
txpa_extent = (-97.51611328125, 33.0583305358887,-79.4607315063477, 40.1847610473633)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(name='Roswell').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
ref_u1 = MultiPoint(p1, p2, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name='Roswell').unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# the same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.failUnless(isinstance(d['point'], Geometry))
self.failUnless(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# Adding two more cities, but this time making sure that their location
# ID values do not match their City ID values.
loc1 = Location.objects.create(point='POINT (-95.363151 29.763374)')
loc2 = Location.objects.create(point='POINT (-96.801611 32.782057)')
dallas = City.objects.create(name='Dallas', state='TX', location=loc2)
houston = City.objects.create(name='Houston', state='TX', location=loc1)
# The expected ID values -- notice the last two location IDs
# are out of order. We want to make sure that the related
# location ID column is selected instead of ID column for
# the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.failUnless('Aurora' in names)
self.failUnless('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# Creating a new City, 'Fort Worth', that uses the same location
# as Dallas.
dallas = City.objects.get(name='Dallas')
ftworth = City.objects.create(name='Fort Worth', state='TX', location=dallas.location)
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Creating some data for the Book/Author non-geo models that
# use GeoManager. See #11087.
tp = Author.objects.create(name='Trevor Paglen')
Book.objects.create(title='Torture Taxi', author=tp)
Book.objects.create(title='I Could Tell You But Then You Would Have to be Destroyed by Me', author=tp)
Book.objects.create(title='Blank Spots on the Map', author=tp)
wp = Author.objects.create(name='William Patry')
Book.objects.create(title='Patry on Copyright', author=wp)
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books. Also testing
# with a `GeoValuesQuerySet` (see #11489).
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = fromstr('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
# TODO: Related tests for KML, GML, and distance lookups.
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(RelatedGeoModelTest))
return s
|
v4l3r10/goingxincoin | refs/heads/master | share/qt/make_spinner.py | 4415 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/isosurface/hoverlabel/_namelengthsrc.py | 1 | import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="isosurface.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
jazzabeanie/python_koans | refs/heads/master | python2/koans/about_new_style_classes.py | 81 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(____, issubclass(self.NewStyleClass, object))
self.assertEqual(____, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(__, len(dir(self.OldStyleClass)))
self.assertEqual(__, self.OldStyleClass.__doc__)
self.assertEqual(__, self.OldStyleClass.__module__)
self.assertEqual(__, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual(__, type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch(__, ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, self.NewStyleClass.__class__)
self.assertEqual(
__,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual(__, old_style.__class__.__name__)
self.assertEqual(__, type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, new_style.__class__.__name__)
self.assertEqual(__, type(new_style) == new_style.__class__)
|
Venturi/cms | refs/heads/master | env/lib/python2.7/site-packages/unidecode/x06c.py | 252 | data = (
'Lu ', # 0x00
'Mu ', # 0x01
'Li ', # 0x02
'Tong ', # 0x03
'Rong ', # 0x04
'Chang ', # 0x05
'Pu ', # 0x06
'Luo ', # 0x07
'Zhan ', # 0x08
'Sao ', # 0x09
'Zhan ', # 0x0a
'Meng ', # 0x0b
'Luo ', # 0x0c
'Qu ', # 0x0d
'Die ', # 0x0e
'Shi ', # 0x0f
'Di ', # 0x10
'Min ', # 0x11
'Jue ', # 0x12
'Mang ', # 0x13
'Qi ', # 0x14
'Pie ', # 0x15
'Nai ', # 0x16
'Qi ', # 0x17
'Dao ', # 0x18
'Xian ', # 0x19
'Chuan ', # 0x1a
'Fen ', # 0x1b
'Ri ', # 0x1c
'Nei ', # 0x1d
'[?] ', # 0x1e
'Fu ', # 0x1f
'Shen ', # 0x20
'Dong ', # 0x21
'Qing ', # 0x22
'Qi ', # 0x23
'Yin ', # 0x24
'Xi ', # 0x25
'Hai ', # 0x26
'Yang ', # 0x27
'An ', # 0x28
'Ya ', # 0x29
'Ke ', # 0x2a
'Qing ', # 0x2b
'Ya ', # 0x2c
'Dong ', # 0x2d
'Dan ', # 0x2e
'Lu ', # 0x2f
'Qing ', # 0x30
'Yang ', # 0x31
'Yun ', # 0x32
'Yun ', # 0x33
'Shui ', # 0x34
'San ', # 0x35
'Zheng ', # 0x36
'Bing ', # 0x37
'Yong ', # 0x38
'Dang ', # 0x39
'Shitamizu ', # 0x3a
'Le ', # 0x3b
'Ni ', # 0x3c
'Tun ', # 0x3d
'Fan ', # 0x3e
'Gui ', # 0x3f
'Ting ', # 0x40
'Zhi ', # 0x41
'Qiu ', # 0x42
'Bin ', # 0x43
'Ze ', # 0x44
'Mian ', # 0x45
'Cuan ', # 0x46
'Hui ', # 0x47
'Diao ', # 0x48
'Yi ', # 0x49
'Cha ', # 0x4a
'Zhuo ', # 0x4b
'Chuan ', # 0x4c
'Wan ', # 0x4d
'Fan ', # 0x4e
'Dai ', # 0x4f
'Xi ', # 0x50
'Tuo ', # 0x51
'Mang ', # 0x52
'Qiu ', # 0x53
'Qi ', # 0x54
'Shan ', # 0x55
'Pai ', # 0x56
'Han ', # 0x57
'Qian ', # 0x58
'Wu ', # 0x59
'Wu ', # 0x5a
'Xun ', # 0x5b
'Si ', # 0x5c
'Ru ', # 0x5d
'Gong ', # 0x5e
'Jiang ', # 0x5f
'Chi ', # 0x60
'Wu ', # 0x61
'Tsuchi ', # 0x62
'[?] ', # 0x63
'Tang ', # 0x64
'Zhi ', # 0x65
'Chi ', # 0x66
'Qian ', # 0x67
'Mi ', # 0x68
'Yu ', # 0x69
'Wang ', # 0x6a
'Qing ', # 0x6b
'Jing ', # 0x6c
'Rui ', # 0x6d
'Jun ', # 0x6e
'Hong ', # 0x6f
'Tai ', # 0x70
'Quan ', # 0x71
'Ji ', # 0x72
'Bian ', # 0x73
'Bian ', # 0x74
'Gan ', # 0x75
'Wen ', # 0x76
'Zhong ', # 0x77
'Fang ', # 0x78
'Xiong ', # 0x79
'Jue ', # 0x7a
'Hang ', # 0x7b
'Niou ', # 0x7c
'Qi ', # 0x7d
'Fen ', # 0x7e
'Xu ', # 0x7f
'Xu ', # 0x80
'Qin ', # 0x81
'Yi ', # 0x82
'Wo ', # 0x83
'Yun ', # 0x84
'Yuan ', # 0x85
'Hang ', # 0x86
'Yan ', # 0x87
'Chen ', # 0x88
'Chen ', # 0x89
'Dan ', # 0x8a
'You ', # 0x8b
'Dun ', # 0x8c
'Hu ', # 0x8d
'Huo ', # 0x8e
'Qie ', # 0x8f
'Mu ', # 0x90
'Rou ', # 0x91
'Mei ', # 0x92
'Ta ', # 0x93
'Mian ', # 0x94
'Wu ', # 0x95
'Chong ', # 0x96
'Tian ', # 0x97
'Bi ', # 0x98
'Sha ', # 0x99
'Zhi ', # 0x9a
'Pei ', # 0x9b
'Pan ', # 0x9c
'Zhui ', # 0x9d
'Za ', # 0x9e
'Gou ', # 0x9f
'Liu ', # 0xa0
'Mei ', # 0xa1
'Ze ', # 0xa2
'Feng ', # 0xa3
'Ou ', # 0xa4
'Li ', # 0xa5
'Lun ', # 0xa6
'Cang ', # 0xa7
'Feng ', # 0xa8
'Wei ', # 0xa9
'Hu ', # 0xaa
'Mo ', # 0xab
'Mei ', # 0xac
'Shu ', # 0xad
'Ju ', # 0xae
'Zan ', # 0xaf
'Tuo ', # 0xb0
'Tuo ', # 0xb1
'Tuo ', # 0xb2
'He ', # 0xb3
'Li ', # 0xb4
'Mi ', # 0xb5
'Yi ', # 0xb6
'Fa ', # 0xb7
'Fei ', # 0xb8
'You ', # 0xb9
'Tian ', # 0xba
'Zhi ', # 0xbb
'Zhao ', # 0xbc
'Gu ', # 0xbd
'Zhan ', # 0xbe
'Yan ', # 0xbf
'Si ', # 0xc0
'Kuang ', # 0xc1
'Jiong ', # 0xc2
'Ju ', # 0xc3
'Xie ', # 0xc4
'Qiu ', # 0xc5
'Yi ', # 0xc6
'Jia ', # 0xc7
'Zhong ', # 0xc8
'Quan ', # 0xc9
'Bo ', # 0xca
'Hui ', # 0xcb
'Mi ', # 0xcc
'Ben ', # 0xcd
'Zhuo ', # 0xce
'Chu ', # 0xcf
'Le ', # 0xd0
'You ', # 0xd1
'Gu ', # 0xd2
'Hong ', # 0xd3
'Gan ', # 0xd4
'Fa ', # 0xd5
'Mao ', # 0xd6
'Si ', # 0xd7
'Hu ', # 0xd8
'Ping ', # 0xd9
'Ci ', # 0xda
'Fan ', # 0xdb
'Chi ', # 0xdc
'Su ', # 0xdd
'Ning ', # 0xde
'Cheng ', # 0xdf
'Ling ', # 0xe0
'Pao ', # 0xe1
'Bo ', # 0xe2
'Qi ', # 0xe3
'Si ', # 0xe4
'Ni ', # 0xe5
'Ju ', # 0xe6
'Yue ', # 0xe7
'Zhu ', # 0xe8
'Sheng ', # 0xe9
'Lei ', # 0xea
'Xuan ', # 0xeb
'Xue ', # 0xec
'Fu ', # 0xed
'Pan ', # 0xee
'Min ', # 0xef
'Tai ', # 0xf0
'Yang ', # 0xf1
'Ji ', # 0xf2
'Yong ', # 0xf3
'Guan ', # 0xf4
'Beng ', # 0xf5
'Xue ', # 0xf6
'Long ', # 0xf7
'Lu ', # 0xf8
'[?] ', # 0xf9
'Bo ', # 0xfa
'Xie ', # 0xfb
'Po ', # 0xfc
'Ze ', # 0xfd
'Jing ', # 0xfe
'Yin ', # 0xff
)
|
aprefontaine/TMScheduler | refs/heads/master | tests/modeltests/m2m_recursive/models.py | 18 | """
28. Many-to-many relationships between the same two tables
In this example, a ``Person`` can have many friends, who are also ``Person``
objects. Friendship is a symmetrical relationship - if I am your friend, you
are my friend. Here, ``friends`` is an example of a symmetrical
``ManyToManyField``.
A ``Person`` can also have many idols - but while I may idolize you, you may
not think the same of me. Here, ``idols`` is an example of a non-symmetrical
``ManyToManyField``. Only recursive ``ManyToManyField`` fields may be
non-symmetrical, and they are symmetrical by default.
This test validates that the many-to-many table is created using a mangled name
if there is a name clash, and tests that symmetry is preserved where
appropriate.
"""
from django.db import models
class Person(models.Model):
name = models.CharField(max_length=20)
friends = models.ManyToManyField('self')
idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
def __unicode__(self):
return self.name
__test__ = {'API_TESTS':"""
>>> a = Person(name='Anne')
>>> a.save()
>>> b = Person(name='Bill')
>>> b.save()
>>> c = Person(name='Chuck')
>>> c.save()
>>> d = Person(name='David')
>>> d.save()
# Add some friends in the direction of field definition
# Anne is friends with Bill and Chuck
>>> a.friends.add(b,c)
# David is friends with Anne and Chuck - add in reverse direction
>>> d.friends.add(a,c)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[<Person: Anne>]
# Who is friends with Chuck?
>>> c.friends.all()
[<Person: Anne>, <Person: David>]
# Who is friends with David?
>>> d.friends.all()
[<Person: Anne>, <Person: Chuck>]
# Bill is already friends with Anne - add Anne again, but in the reverse direction
>>> b.friends.add(a)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[<Person: Anne>]
# Remove Anne from Bill's friends
>>> b.friends.remove(a)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[]
# Clear Anne's group of friends
>>> a.friends.clear()
# Who is friends with Anne?
>>> a.friends.all()
[]
# Reverse relationships should also be gone
# Who is friends with Chuck?
>>> c.friends.all()
[<Person: David>]
# Who is friends with David?
>>> d.friends.all()
[<Person: Chuck>]
# Add some idols in the direction of field definition
# Anne idolizes Bill and Chuck
>>> a.idols.add(b,c)
# Bill idolizes Anne right back
>>> b.idols.add(a)
# David is idolized by Anne and Chuck - add in reverse direction
>>> d.stalkers.add(a,c)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is stalking Anne?
>>> a.stalkers.all()
[<Person: Bill>]
# Who are Bill's idols?
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[<Person: Anne>]
# Who are Chuck's idols?
>>> c.idols.all()
[<Person: David>]
# Who is stalking Chuck?
>>> c.stalkers.all()
[<Person: Anne>]
# Who are David's idols?
>>> d.idols.all()
[]
# Who is stalking David
>>> d.stalkers.all()
[<Person: Anne>, <Person: Chuck>]
# Bill is already being stalked by Anne - add Anne again, but in the reverse direction
>>> b.stalkers.add(a)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is stalking Anne?
[<Person: Bill>]
# Who are Bill's idols
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[<Person: Anne>]
# Remove Anne from Bill's list of stalkers
>>> b.stalkers.remove(a)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Chuck>, <Person: David>]
# Who is stalking Anne?
>>> a.stalkers.all()
[<Person: Bill>]
# Who are Bill's idols?
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[]
# Clear Anne's group of idols
>>> a.idols.clear()
# Who are Anne's idols
>>> a.idols.all()
[]
# Reverse relationships should also be gone
# Who is stalking Chuck?
>>> c.stalkers.all()
[]
# Who is friends with David?
>>> d.stalkers.all()
[<Person: Chuck>]
"""}
|
steinam/teacher | refs/heads/master | jup_notebooks/data-science-ipython-notebooks-master/scikit-learn/fig_code/linear_regression.py | 63 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
def plot_linear_regression():
a = 0.5
b = 1.0
# x from 0 to 10
x = 30 * np.random.random(20)
# y = a*x + b with noise
y = a * x + b + np.random.normal(size=x.shape)
# create a linear regression classifier
clf = LinearRegression()
clf.fit(x[:, None], y)
# predict y from the data
x_new = np.linspace(0, 30, 100)
y_new = clf.predict(x_new[:, None])
# plot the results
ax = plt.axes()
ax.scatter(x, y)
ax.plot(x_new, y_new)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('tight')
if __name__ == '__main__':
plot_linear_regression()
plt.show()
|
camptocamp/ngo-addons-backport | refs/heads/master | addons/process/__openerp__.py | 65 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Enterprise Process',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module shows the basic processes involved in the selected modules and in the sequence they occur.
======================================================================================================
**Note:** This applies to the modules containing modulename_process.xml.
**e.g.** product/process/product_process.xml.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['web'],
'data': [
'security/ir.model.access.csv',
'process_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/process_nodes.jpeg','images/process_transitions.jpeg', 'images/processes.jpeg'],
'js': [
'static/src/js/process.js'
],
'css': [
'static/src/css/process.css'
],
'qweb': [
'static/src/xml/*.xml'
],
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sjev/ibpy | refs/heads/master | ib/sym/__init__.py | 15 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def durationMethod(k):
def m(cls, val):
return '%s %s' % (val, k)
return classmethod(m)
class HDDuration:
seconds = durationMethod('S')
days = durationMethod('D')
weeks = durationMethod('W')
months = durationMethod('M')
years = durationMethod('Y')
class HDBar:
sec = sec1 = '1 sec'
sec5 = '5 secs'
sec15 = '15 secs'
sec30 = '30 secs'
min1 = '1 min'
min2 = '2 mins'
min5 = '5 mins'
min15 = '15 mins'
min30 = '30 mins'
hour = hour1 = '1 hour'
day = day1 = '1 day'
week = week1 = '1 week'
month = month1 = '1 month'
month3 = '3 months'
year = year1 = '1 year'
class HDShow:
trades = 'TRADES'
mid = 'MIDPOINT'
bid = 'BID'
ask = 'ASK'
bid_ask = 'BID/ASK'
class HDDateFormat:
long = 1 # yyyymmdd{space}{space}hh:mm:dd
short = 2 # 1/1/1970 GMT
class YesNo:
no = false = 0
yes = true = 1
class RTH(YesNo):
pass
class AllOrNone(YesNo):
pass
class Override(YesNo):
pass
class FirmQuoteOnly(YesNo):
pass
class ETradeOnly(YesNo):
pass
class ContinuousUpdate(YesNo):
pass
class AuctionStrategy:
match = 1
improvement = 2
transparent = 3
class ServerLogLevel:
system, error, warning, information, detail = \
sys, err, warn, info, det = range(1, 6)
class FaDataType:
groups, profile, account_aliases = range(1, 4)
class ExerciseAction:
exercise, lapse = range(1, 3)
class TriggerMethod:
default = 0
double_askbid = 1
last = 2
double_last = 3
class ShortSaleSlot:
unapplicable = 0
clearing_broker = 1
third_party = 2
class OcaType:
cancel_on_fill_block = 1
reduce_on_fill_block = 2
reduce_on_fill_noblock = 3
class Rule80a:
individual = 'I'
agency = 'A'
agent_other_member = 'W'
individual_ptia = 'J'
agency_ptia = 'U'
agent_other_member_ptia = 'M'
individual_pt = 'K'
agency_pt = 'Y'
agent_other_member_pt = 'N'
class RefPriceType:
avg = 1
bidask = 2
class VolatilityType:
daily = 1
annual = 2
class GenericTickTypes:
option_volume = 100
option_open_interest = 101
historical_volatility = 104
option_implied_volatility = 106
index_future_premium = 162
misc_stats = 165
mark_price = 221
auction_values = 225
shortable = 236
class TickValues:
low_13_week = 15
high_13_week = 16
low_26_week = 17
high_26_week = 18
low_52_week = 19
high_52_week = 20
avg_volume = 21
option_historical_vol = 23
option_implied_vol = 24
option_call_open_interest = 27
option_put_open_interest = 28
option_call_volume = 29
option_put_volume = 30
index_future_premium = 31
auction_volume = 34
auction_price = 35
auction_imbalance = 36
mark_price = 37
shortable = 46
|
longman694/youtube-dl | refs/heads/mod | youtube_dl/extractor/esri.py | 64 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
parse_filesize,
unified_strdate,
)
class EsriVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
'info_dict': {
'id': '1124',
'ext': 'mp4',
'title': 'ArcGIS Online - Developing Applications',
'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 185,
'upload_date': '20120419',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for width, height, content in re.findall(
r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
for video_url, ext, filesize in re.findall(
r'<a[^>]+href="([^"]+)">([^<]+) \(([^<]+)\)</a>', content):
formats.append({
'url': compat_urlparse.urljoin(url, video_url),
'ext': ext.lower(),
'format_id': '%s-%s' % (ext.lower(), height),
'width': int(width),
'height': int(height),
'filesize_approx': parse_filesize(filesize),
})
self._sort_formats(formats)
title = self._html_search_meta('title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
duration = int_or_none(self._search_regex(
[r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_meta(
'last-modified', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'formats': formats
}
|
NL66278/OCB | refs/heads/8.0 | addons/analytic/analytic.py | 110 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'manager_id': lambda self, cr, uid, ctx: ctx.get('manager_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', operator, name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
songmonit/CTTMSONLINE_V8 | refs/heads/master | addons/sale_design/wizard/__init__.py | 2 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import design_make_sale
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
consultit/Ely | refs/heads/master | ely/direct/data_structures_and_algorithms/ch12/merge_queue.py | 1 | # Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..ch07.linked_queue import LinkedQueue
def merge(S1, S2, S):
"""Merge two sorted queue instances S1 and S2 into empty queue S."""
while not S1.is_empty() and not S2.is_empty():
if S1.first() < S2.first():
S.enqueue(S1.dequeue())
else:
S.enqueue(S2.dequeue())
while not S1.is_empty(): # move remaining elements of S1 to S
S.enqueue(S1.dequeue())
while not S2.is_empty(): # move remaining elements of S2 to S
S.enqueue(S2.dequeue())
def merge_sort(S):
"""Sort the elements of queue S using the merge-sort algorithm."""
n = len(S)
if n < 2:
return # list is already sorted
# divide
S1 = LinkedQueue() # or any other queue implementation
S2 = LinkedQueue()
while len(S1) < n // 2: # move the first n//2 elements to S1
S1.enqueue(S.dequeue())
while not S.is_empty(): # move the rest to S2
S2.enqueue(S.dequeue())
# conquer (with recursion)
merge_sort(S1) # sort first half
merge_sort(S2) # sort second half
# merge results
merge(S1, S2, S) # merge sorted halves back into S
|
Salat-Cx65/python-for-android | refs/heads/master | python3-alpha/python3-src/Tools/pybench/Strings.py | 92 | from pybench import Test
import sys
try:
intern
except NameError:
intern = sys.intern
class ConcatStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 100000
def test(self):
# Make sure the strings are *not* interned
s = ''.join(map(str,range(100)))
t = ''.join(map(str,range(1,101)))
for i in range(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = ''.join(map(str,range(100)))
t = ''.join(map(str,range(1,101)))
for i in range(self.rounds):
pass
class CompareStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 200000
def test(self):
# Make sure the strings are *not* interned
s = ''.join(map(str,range(10)))
t = ''.join(map(str,range(10))) + "abc"
for i in range(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = ''.join(map(str,range(10)))
t = ''.join(map(str,range(10))) + "abc"
for i in range(self.rounds):
pass
class CompareInternedStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 300000
def test(self):
# Make sure the strings *are* interned
s = intern(''.join(map(str,range(10))))
t = s
for i in range(self.rounds):
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
def calibrate(self):
s = intern(''.join(map(str,range(10))))
t = s
for i in range(self.rounds):
pass
class CreateStringsWithConcat(Test):
version = 2.0
operations = 10 * 5
rounds = 200000
def test(self):
for i in range(self.rounds):
s = 'om'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
def calibrate(self):
for i in range(self.rounds):
pass
class StringSlicing(Test):
version = 2.0
operations = 5 * 7
rounds = 160000
def test(self):
s = ''.join(map(str,range(100)))
for i in range(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = ''.join(map(str,range(100)))
for i in range(self.rounds):
pass
### String methods
if hasattr('', 'lower'):
class StringMappings(Test):
version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 70000
def test(self):
s = ''.join(map(chr,range(20)))
t = ''.join(map(chr,range(50)))
u = ''.join(map(chr,range(100)))
v = ''.join(map(chr,range(256)))
for i in range(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = ''.join(map(chr,range(20)))
t = ''.join(map(chr,range(50)))
u = ''.join(map(chr,range(100)))
v = ''.join(map(chr,range(256)))
for i in range(self.rounds):
pass
class StringPredicates(Test):
version = 2.0
operations = 10 * 7
rounds = 100000
def test(self):
data = ('abc', '123', ' ', '\xe4\xf6\xfc', '\xdf'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = ('abc', '123', ' ', '\u1234\u2345\u3456', '\uFFFF'*10)
data = ('abc', '123', ' ', '\xe4\xf6\xfc', '\xdf'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
|
cvegaj/ElectriCERT | refs/heads/master | venv3/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/version.py | 1151 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
linjeffrey/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/mac.py | 113 | # Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import time
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.executive import ScriptError
from webkitpy.port.apple import ApplePort
from webkitpy.port.leakdetector import LeakDetector
_log = logging.getLogger(__name__)
class MacPort(ApplePort):
port_name = "mac"
VERSION_FALLBACK_ORDER = ['mac-snowleopard', 'mac-lion', 'mac-mountainlion']
ARCHITECTURES = ['x86_64', 'x86']
def __init__(self, host, port_name, **kwargs):
ApplePort.__init__(self, host, port_name, **kwargs)
self._architecture = self.get_option('architecture')
if not self._architecture:
self._architecture = 'x86_64'
self._leak_detector = LeakDetector(self)
if self.get_option("leaks"):
# DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
# with MallocStackLogging enabled.
self.set_option_default("batch_size", 1000)
def default_timeout_ms(self):
if self.get_option('guard_malloc'):
return 350 * 1000
return super(MacPort, self).default_timeout_ms()
def supports_per_test_timeout(self):
return True
def _build_driver_flags(self):
return ['ARCHS=i386'] if self.architecture() == 'x86' else []
def should_retry_crashes(self):
# On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
return True
def default_baseline_search_path(self):
name = self._name.replace('-wk2', '')
if name.endswith(self.FUTURE_VERSION):
fallback_names = [self.port_name]
else:
fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(name):-1] + [self.port_name]
if self.get_option('webkit_test_runner'):
fallback_names = [self._wk2_port_name(), 'wk2'] + fallback_names
return map(self._webkit_baseline_path, fallback_names)
def _port_specific_expectations_files(self):
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self.baseline_search_path()]))
def setup_environ_for_server(self, server_name=None):
env = super(MacPort, self).setup_environ_for_server(server_name)
if server_name == self.driver_name():
if self.get_option('leaks'):
env['MallocStackLogging'] = '1'
if self.get_option('guard_malloc'):
env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib:' + self._build_path("libWebCoreTestShim.dylib")
else:
env['DYLD_INSERT_LIBRARIES'] = self._build_path("libWebCoreTestShim.dylib")
env['XML_CATALOG_FILES'] = '' # work around missing /etc/catalog <rdar://problem/4292995>
return env
def operating_system(self):
return 'mac'
# Belongs on a Platform object.
def is_snowleopard(self):
return self._version == "snowleopard"
# Belongs on a Platform object.
def is_lion(self):
return self._version == "lion"
def default_child_processes(self):
if self._version == "snowleopard":
_log.warning("Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.")
return 1
default_count = super(MacPort, self).default_child_processes()
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906 With too many WebProcess WK2 tests get stuck in resource contention.
# To alleviate the issue reduce the number of running processes
# Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
should_throttle_for_wk2 = self.get_option('webkit_test_runner') and default_count > 4
# We also want to throttle for leaks bots.
if should_throttle_for_wk2 or self.get_option('leaks'):
default_count = int(.75 * default_count)
# Make sure we have enough ram to support that many instances:
total_memory = self.host.platform.total_bytes_memory()
if total_memory:
bytes_per_drt = 256 * 1024 * 1024 # Assume each DRT needs 256MB to run.
overhead = 2048 * 1024 * 1024 # Assume we need 2GB free for the O/S
supportable_instances = max((total_memory - overhead) / bytes_per_drt, 1) # Always use one process, even if we don't have space for it.
if supportable_instances < default_count:
_log.warning("This machine could support %s child processes, but only has enough memory for %s." % (default_count, supportable_instances))
else:
_log.warning("Cannot determine available memory for child processes, using default child process count of %s." % default_count)
supportable_instances = default_count
return min(supportable_instances, default_count)
def _build_java_test_support(self):
java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
build_java = [self.make_command(), "-C", java_tests_path]
if self._executive.run_command(build_java, return_exit_code=True): # Paths are absolute, so we don't need to set a cwd.
_log.error("Failed to build Java support files: %s" % build_java)
return False
return True
def check_for_leaks(self, process_name, process_pid):
if not self.get_option('leaks'):
return
# We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
self._leak_detector.check_for_leaks(process_name, process_pid)
def print_leaks_summary(self):
if not self.get_option('leaks'):
return
# We're in the manager process, so the leak detector will not have a valid list of leak files.
# FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
# FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
if not leaks_files:
return
total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
total_leaks = self._leak_detector.count_total_leaks(leaks_files)
_log.info("%s total leaks found for a total of %s!" % (total_leaks, total_bytes_string))
_log.info("%s unique leaks found!" % unique_leaks)
def _check_port_build(self):
return self.get_option('nojava') or self._build_java_test_support()
def _path_to_webcore_library(self):
return self._build_path('WebCore.framework/Versions/A/WebCore')
def show_results_html_file(self, results_filename):
# We don't use self._run_script() because we don't want to wait for the script
# to exit and we want the output to show up on stdout in case there are errors
# launching the browser.
self._executive.popen([self.path_to_script('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
cwd=self.webkit_base(), stdout=file(os.devnull), stderr=file(os.devnull))
# FIXME: The next two routines turn off the http locking in order
# to work around failures on the bots caused when the slave restarts.
# See https://bugs.webkit.org/show_bug.cgi?id=64886 for more info.
# The proper fix is to make sure the slave is actually stopping NRWT
# properly on restart. Note that by removing the lock file and not waiting,
# the result should be that if there is a web server already running,
# it'll be killed and this one will be started in its place; this
# may lead to weird things happening in the other run. However, I don't
# think we're (intentionally) actually running multiple runs concurrently
# on any Mac bots.
def acquire_http_lock(self):
pass
def release_http_lock(self):
pass
def sample_file_path(self, name, pid):
return self._filesystem.join(self.results_directory(), "{0}-{1}-sample.txt".format(name, pid))
def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
# Note that we do slow-spin here and wait, since it appears the time
# ReportCrash takes to actually write and flush the file varies when there are
# lots of simultaneous crashes going on.
# FIXME: Should most of this be moved into CrashLogs()?
time_fn = time_fn or time.time
sleep_fn = sleep_fn or time.sleep
crash_log = ''
crash_logs = CrashLogs(self.host)
now = time_fn()
# FIXME: delete this after we're sure this code is working ...
_log.debug('looking for crash log for %s:%s' % (name, str(pid)))
deadline = now + 5 * int(self.get_option('child_processes', 1))
while not crash_log and now <= deadline:
crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
if not wait_for_log:
break
if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith('ERROR')]:
sleep_fn(0.1)
now = time_fn()
if not crash_log:
return (stderr, None)
return (stderr, crash_log)
def look_for_new_crash_logs(self, crashed_processes, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
crashes: test_name -> pid, process_name tuple of crashed process
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crash_logs = {}
for (test_name, process_name, pid) in crashed_processes:
# Passing None for output. This is a second pass after the test finished so
# if the output had any logging we would have already collected it.
crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
if not crash_log:
continue
crash_logs[test_name] = crash_log
return crash_logs
def look_for_new_samples(self, unresponsive_processes, start_time):
sample_files = {}
for (test_name, process_name, pid) in unresponsive_processes:
sample_file = self.sample_file_path(process_name, pid)
if not self._filesystem.isfile(sample_file):
continue
sample_files[test_name] = sample_file
return sample_files
def sample_process(self, name, pid):
try:
hang_report = self.sample_file_path(name, pid)
self._executive.run_command([
"/usr/bin/sample",
pid,
10,
10,
"-file",
hang_report,
])
except ScriptError as e:
_log.warning('Unable to sample process:' + str(e))
def _path_to_helper(self):
binary_name = 'LayoutTestHelper'
return self._build_path(binary_name)
def start_helper(self):
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
self._helper = self._executive.popen([helper_path],
stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("LayoutTestHelper failed to be ready")
def stop_helper(self):
if self._helper:
_log.debug("Stopping LayoutTestHelper")
try:
self._helper.stdin.write("x\n")
self._helper.stdin.close()
self._helper.wait()
except IOError, e:
_log.debug("IOError raised while stopping helper: %s" % str(e))
self._helper = None
def make_command(self):
return self.xcrun_find('make', '/usr/bin/make')
def nm_command(self):
return self.xcrun_find('nm', 'nm')
def xcrun_find(self, command, fallback):
try:
return self._executive.run_command(['xcrun', '-find', command]).rstrip()
except ScriptError:
_log.warn("xcrun failed; falling back to '%s'." % fallback)
return fallback
|
jaduse/pocket-on-term | refs/heads/master | pot/reader.py | 1 | # -*- coding: utf-8 -*-
import urllib.request
import urllib.parse
import urllib.error
import json
import sys
import os
import urwid
from subprocess import Popen
from subprocess import PIPE
from subprocess import SubprocessError
from bs4 import BeautifulSoup
from .pocketapi import PocketUtils
class Reader:
PALETTE = [
("default", "dark gray", "white", "standout"),
("dark", "black", "white", "standout"),
("pg normal", "white", "dark gray", "standout"),
("pg complete", "black", "dark magenta"),
("popup", "black", "dark green"),
("header", "white", "white", "bold")
]
term_w, term_h = os.get_terminal_size()
def __init__(self, article, config=None):
self.article = article
self.config = config if config else {
"theme": "default"
}
self.title = article.resolved_title if (
len(article.resolved_title) <= self.term_w
) else article.resolved_title[:self.term_w - 1]
self.buffer, self.ref = self.parse()
self.set_up()
def set_up(self):
self.offset = 0
self.size = (self.term_w, self.term_h)
self.content = Content(self.buffer, self.ref, self.size)
self.status = urwid.ProgressBar(
"pg normal",
"pg complete",
done=len(self.buffer)-self.term_h
)
self.scr = urwid.Frame(
self.content,
urwid.Text(("header", self.title)),
self.status)
self.loop = urwid.MainLoop(
self.scr,
self.PALETTE,
pop_ups=True,
handle_mouse=False
)
urwid.connect_signal(self.content, "update_pg", self.update_pg)
self.loop.screen.set_terminal_properties(colors=16)
self.loop.run()
def parse(self):
content = self.article.text
elinks_params = [
"elinks",
"-dump", "1",
# "-dump-color-mode", "1",
"-dump-width", str(self.term_w)
]
elinks_proc = Popen(
elinks_params,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True
)
out, err = elinks_proc.communicate(content)
if err:
raise SubprocessError()
article, links = self.split_at_last(
lambda x: x.startswith(" Visible links"),
out.splitlines(True)
)
return (article, links)
def update_pg(self):
self.status.set_completion(self.content.offset)
def split_at_last(self, cond, seq):
def split_list(index, seq):
return (seq[:index - 1], seq[index:])
for i, l in enumerate(reversed(seq)):
if cond(l):
return split_list(len(seq) - i, seq)
class Content(urwid.PopUpLauncher, urwid.WidgetWrap):
__metaclass__ = urwid.MetaSignals
signals = [
"keypress",
"update_pg"
]
def __init__(self, buffer, links, size):
self.buffer = buffer
self.text = urwid.Text("")
self.popup = Dialog(links)
self.fill = urwid.Filler(self.text)
self.offset = 0
self.term_w, self.term_h = size
super(Content, self).__init__(self.fill)
urwid.connect_signal(self, "keypress", self.keypress)
urwid.connect_signal(self.popup, "close", self.close_pop_up)
self.redraw()
def redraw(self):
cur_e = self.offset + self.term_h - 2
lines = self.buffer[self.offset:cur_e]
self.text.set_text(lines)
urwid.emit_signal(self, "update_pg")
def keypress(self, size, key):
def up():
if self.offset > 0:
self.offset -= 1
def down():
if (self.offset + self.term_h < len(self.buffer)):
self.offset += 1
def home():
self.offset = 0
def end():
self.offset = len(self.buffer) - self.term_h - 1
def page_down():
if self.offset + 2 * self.term_h < len(self.buffer):
self.offset += self.term_h
def page_up():
if self.offset - self.term_h > 0:
self.offset -= self.term_h
def r():
self.open_pop_up()
def q():
raise urwid.ExitMainLoop()
key = key.replace(" ", "_")
if key not in locals():
return False
locals().get(key)()
self.redraw()
def create_pop_up(self):
return self.popup
def get_pop_up_parameters(self):
return {
'left': 0,
'top': 2,
'overlay_width': self.term_w,
'overlay_height': self.term_h//3
}
def selectable(self):
return True
class Dialog(urwid.WidgetWrap):
signals = ["close"]
def __init__(self, links):
self.body = [urwid.Text(l) for l in links]
self.listbox = urwid.ListBox(
urwid.SimpleFocusListWalker(self.body))
self.win = urwid.LineBox(
self.listbox,
title="Links"
)
self.attr_win = urwid.AttrMap(self.win, "popup")
super(Dialog, self).__init__(self.attr_win)
def keypress(self, size, key):
if key == "q":
urwid.emit_signal(self, "close")
self.listbox.keypress(size, key)
class Readability:
"""Deprecated since i haxed Pocket API"""
token = "4bce87cb2c7fa40f102378923718dbbc1c864317"
api_url = "https://readability.com/api/content/v1/parser"
def __init__(self, url):
self.url = url
self.article = self.retrieve()
def retrieve(self):
params = urllib.parse.urlencode(
{
"url": self.url,
"token": self.token
}
)
request = urllib.request.Request(
'?'.join((self.api_url, params))
)
request_opener = urllib.request.build_opener()
try:
response = request_opener.open(request)
except urllib.error.HTTPError as err:
sys.stderr.write("{0}\n".format(err))
return False
article = json.loads(
response.readall().decode("utf-8")
)
return article
def get(self, *args):
if not args:
return self.article.keys()
return self.article[args[0]]
class Article:
def __init__(self, id, item):
self.id = id
self.__dict__.update(item)
self.has_image = self.has_image == "1"
self.has_video = self.has_video == "1"
self.favorite = self.favorite == "1"
@property
def text(self):
def replace_RIL(divs, pholder, links):
for div in divs:
itm_id = div["id"].rsplit("_", 1)[-1]
a_tag = bs.new_tag("a", href=links[itm_id]["src"])
a_tag.insert(0, "[{}]".format(pholder))
div.replace_with(a_tag)
response = PocketUtils.parser(self.resolved_url)
if response["responseCode"] != "200":
return
bs = BeautifulSoup(response["article"], "html.parser")
if self.has_image:
replace_RIL(
bs.findAll("div", class_="RIL_IMG"),
"IMG", self.images)
if self.has_video:
replace_RIL(bs.findAll(
"div", class_="RIL_VIDEO"),
"VIDEO", self.videos)
return bs.prettify()
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/django/core/cache/backends/filebased.py | 428 | "File-based cache backend"
import errno
import glob
import hashlib
import io
import os
import random
import tempfile
import time
import zlib
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.files.move import file_move_safe
from django.utils.encoding import force_bytes
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class FileBasedCache(BaseCache):
cache_suffix = '.djcache'
def __init__(self, dir, params):
super(FileBasedCache, self).__init__(params)
self._dir = os.path.abspath(dir)
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version):
return False
self.set(key, value, timeout, version)
return True
def get(self, key, default=None, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
try:
with io.open(fname, 'rb') as f:
if not self._is_expired(f):
return pickle.loads(zlib.decompress(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
pass # Cache file was removed after the exists check
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
self._createdir() # Cache dir can be deleted at any time.
fname = self._key_to_file(key, version)
self._cull() # make some room if necessary
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with io.open(fd, 'wb') as f:
expiry = self.get_backend_timeout(timeout)
f.write(pickle.dumps(expiry, -1))
f.write(zlib.compress(pickle.dumps(value), -1))
file_move_safe(tmp_path, fname, allow_overwrite=True)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def delete(self, key, version=None):
self._delete(self._key_to_file(key, version))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except OSError as e:
# ENOENT can happen if the cache file is removed (by another
# process) after the os.path.exists check.
if e.errno != errno.ENOENT:
raise
def has_key(self, key, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
with io.open(fname, 'rb') as f:
return not self._is_expired(f)
return False
def _cull(self):
"""
Removes random cache entries if max_entries is reached at a ratio
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
that the entire cache will be purged.
"""
filelist = self._list_cache_files()
num_entries = len(filelist)
if num_entries < self._max_entries:
return # return early if no culling is required
if self._cull_frequency == 0:
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
# Delete a random selection of entries
filelist = random.sample(filelist,
int(num_entries / self._cull_frequency))
for fname in filelist:
self._delete(fname)
def _createdir(self):
if not os.path.exists(self._dir):
try:
os.makedirs(self._dir, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise EnvironmentError(
"Cache directory '%s' does not exist "
"and could not be created'" % self._dir)
def _key_to_file(self, key, version=None):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the key and a suffix.
"""
key = self.make_key(key, version=version)
self.validate_key(key)
return os.path.join(self._dir, ''.join(
[hashlib.md5(force_bytes(key)).hexdigest(), self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _is_expired(self, f):
"""
Takes an open cache file and determines if it has expired,
deletes the file if it is has passed its expiry time.
"""
exp = pickle.load(f)
if exp is not None and exp < time.time():
f.close() # On Windows a file has to be closed before deleting
self._delete(f.name)
return True
return False
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
filelist = [os.path.join(self._dir, fname) for fname
in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
return filelist
|
costadorione/purestream | refs/heads/master | lib/requests/packages/chardet/sjisprober.py | 1776 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
AnishShah/tensorflow | refs/heads/master | tensorflow/python/platform/app_test.py | 201 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('myflag', False, '')
def main(argv):
if (len(argv) != 3):
print("Length of argv was not 3: ", argv)
sys.exit(-1)
if argv[1] != "--passthrough":
print("--passthrough argument not in argv")
sys.exit(-1)
if argv[2] != "extra":
print("'extra' argument not in argv")
sys.exit(-1)
if __name__ == '__main__':
sys.argv.extend(["--myflag", "--passthrough", "extra"])
app.run()
|
dhuang/incubator-airflow | refs/heads/master | airflow/utils/state.py | 3 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from builtins import object
class State(object):
"""
Static class with task instance states constants and color method to
avoid hardcoding.
"""
# scheduler
NONE = None
REMOVED = "removed"
SCHEDULED = "scheduled"
# set by the executor (t.b.d.)
# LAUNCHED = "launched"
# set by a task
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown" # External request to shut down
FAILED = "failed"
UP_FOR_RETRY = "up_for_retry"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
task_states = (
SUCCESS,
RUNNING,
FAILED,
UPSTREAM_FAILED,
UP_FOR_RETRY,
QUEUED,
NONE,
SCHEDULED,
)
dag_states = (
SUCCESS,
RUNNING,
FAILED,
)
state_color = {
QUEUED: 'gray',
RUNNING: 'lime',
SUCCESS: 'green',
SHUTDOWN: 'blue',
FAILED: 'red',
UP_FOR_RETRY: 'gold',
UPSTREAM_FAILED: 'orange',
SKIPPED: 'pink',
REMOVED: 'lightgrey',
SCHEDULED: 'tan',
NONE: 'lightblue',
}
@classmethod
def color(cls, state):
if state in cls.state_color:
return cls.state_color[state]
else:
return 'white'
@classmethod
def color_fg(cls, state):
color = cls.color(state)
if color in ['green', 'red']:
return 'white'
else:
return 'black'
@classmethod
def finished(cls):
"""
A list of states indicating that a task started and completed a
run attempt. Note that the attempt could have resulted in failure or
have been interrupted; in any case, it is no longer running.
"""
return [
cls.SUCCESS,
cls.SHUTDOWN,
cls.FAILED,
cls.SKIPPED,
]
@classmethod
def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.UP_FOR_RETRY
]
|
amenonsen/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_remote_role.py | 22 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_remote_role import ApiParameters
from library.modules.bigip_remote_role import ModuleParameters
from library.modules.bigip_remote_role import ModuleManager
from library.modules.bigip_remote_role import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_remote_role import ApiParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleManager
from ansible.modules.network.f5.bigip_remote_role import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
terminal_access='none',
)
p = ModuleParameters(params=args)
assert p.terminal_access == 'disable'
def test_api_parameters(self):
args = load_fixture('load_auth_remote_role_role_info_1.json')
p = ApiParameters(params=args)
assert p.terminal_access == 'disable'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_remote_syslog(self, *args):
set_module_args(dict(
name='foo',
line_order=1000,
attribute_string='bar',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
kernc/scikit-learn | refs/heads/master | sklearn/neighbors/nearest_centroid.py | 34 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
yacneyac/gevent-socketio | refs/heads/master | examples/django_chat/chat/views.py | 11 | from socketio import socketio_manage
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404, render, redirect
from chat.models import ChatRoom
from chat.sockets import ChatNamespace
def rooms(request, template="rooms.html"):
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context)
def room(request, slug, template="room.html"):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context)
def create(request):
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms)
|
harshilasu/GraphicMelon | refs/heads/master | y/google-cloud-sdk/lib/requests/packages/urllib3/contrib/ntlmpool.py | 714 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
ThinkOpen-Solutions/odoo | refs/heads/stable | addons/l10n_si/__init__.py | 439 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard |
krzysztof/invenio-openaire | refs/heads/master | invenio_openaire/fetchers.py | 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""PID fetchers for grants and funders."""
from __future__ import absolute_import, print_function
from invenio_pidstore.fetchers import FetchedPID
def funder_fetcher(record_uuid, data):
"""Fetch PID from funder record."""
return FetchedPID(
provider=None,
pid_type='frdoi',
pid_value=str(data['doi']),
)
def grant_fetcher(record_uuid, data):
"""Fetch PID from grant record."""
return FetchedPID(
provider=None,
pid_type='grant',
pid_value=str(data['internal_id']),
)
|
tectronics/mythbox | refs/heads/master | resources/lib/simplejson/simplejson/tests/test_check_circular.py | 414 | from unittest import TestCase
import simplejson as json
def default_iterable(obj):
return list(obj)
class TestCheckCircular(TestCase):
def test_circular_dict(self):
dct = {}
dct['a'] = dct
self.assertRaises(ValueError, json.dumps, dct)
def test_circular_list(self):
lst = []
lst.append(lst)
self.assertRaises(ValueError, json.dumps, lst)
def test_circular_composite(self):
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
self.assertRaises(ValueError, json.dumps, dct2)
def test_circular_default(self):
json.dumps([set()], default=default_iterable)
self.assertRaises(TypeError, json.dumps, [set()])
def test_circular_off_default(self):
json.dumps([set()], default=default_iterable, check_circular=False)
self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)
|
jesseditson/rethinkdb | refs/heads/next | scripts/compile-web-assets.py | 34 | #!/usr/bin/env python
from __future__ import print_function, division
import os
import sys
import itertools
def main():
try:
assets_root = sys.argv[1]
assert os.path.isdir(assets_root)
except:
print("Error: First argument must be a directory", file=sys.stderr)
sys.exit(1)
# List all the files in assets_root
assets = [
os.path.relpath(os.path.join(root, path), assets_root)
for root, __, paths in os.walk(assets_root)
for path in paths
]
# Write the encoded files and an index to stdout
print(prelude)
write_assets(assets_root, assets)
prelude = """
// Generated by scripts/compile-web-assets.py
#include <map>
#include <string>
"""
def write_assets(asset_root, assets):
print('std::map<std::string, const std::string> static_web_assets = {')
for i, asset in enumerate(assets):
print(' { ' + encode('/' + asset) + ', {', end='')
data = open(os.path.join(asset_root, asset), "rb").read()
position = 0 # track the position to keep lines short
trigraph = 0 # track consecutive question marks to avoid writing trigraphs
prev_e = None # track the previous character to avoid tacking on hex digits
for c in data:
c = byte(c)
if position == 0:
# start a new line
print('\n "', end='')
position = 7
trigraph = 0
prev_e = None
if trigraph >= 2 and c in b"=/'()!<>-":
# split a trigraph
print('" "', end='')
prev_e = None
position += 3
trigraph = 0
elif c == b'?':
# count the amount of question marks
trigraph += 1
else:
trigraph = 0
e = encode_char(c, prev_e)
prev_e = e
print(e, end='')
position += len(e)
if position > 82 or c == b'\n':
# end a line if it gets too long and on newlines
print('"', end='')
position = 0
if position != 0:
print('"', end='')
if not data:
print('""', end='')
print(',')
print(' ' + str(len(data)) + ' } },')
print('};')
def encode(string):
return ''.join(
['"'] +
[encode_char(byte(c)) for c in string.encode('utf-8')]
+ ['"']
)
c_escapes = {
b'\a': '\\a',
b'\b': '\\b',
b'\f': '\\f',
b'\n': '\\n',
b'\r': '\\r',
b'\t': '\\t',
b'\v': '\\v',
b'\\': '\\\\',
b'"': '\\"',
}
# The input character c should be a single character bytes
# The return value is an ascii-compatible unicode string
def encode_char(c, previous=None):
avoid_hex = previous and previous[0] == '\\' and previous[1] in 'x01234567'
n = ord(c)
if c in c_escapes:
return c_escapes[c]
if n < 8:
return '\\' + str(n)
elif 32 <= n and n < 127 and not (avoid_hex and c in b'01234567890abcdefABCDEF'):
return c.decode('ascii')
else:
return '\\x%x' % n
if sys.version < '3':
def byte(b):
return b
else:
def byte(b):
return bytes([b])
if __name__ == "__main__":
main()
|
makinacorpus/rdiff-backup | refs/heads/master | rdiff_backup/compilec.py | 4 | #!/usr/bin/env python
import sys, os
from distutils.core import setup, Extension
assert len(sys.argv) == 1
sys.argv.append("build")
setup(name="CModule",
version="0.9.0",
description="rdiff-backup's C component",
ext_modules=[Extension("C", ["cmodule.c"]),
Extension("_librsync", ["_librsyncmodule.c"],
libraries=["rsync"])])
def get_libraries():
"""Return filename of C.so and _librsync.so files"""
build_files = os.listdir("build")
lib_dirs = filter(lambda x: x.startswith("lib"), build_files)
assert len(lib_dirs) == 1, "No library directory or too many"
libdir = lib_dirs[0]
if sys.platform == "cygwin" or os.name == "nt": libext = "dll"
else: libext = "so"
clib = os.path.join("build", libdir, "C." + libext)
rsynclib = os.path.join("build", libdir, "_librsync." + libext)
try:
os.lstat(clib)
os.lstat(rsynclib)
except os.error:
print "Library file missing"
sys.exit(1)
return clib, rsynclib
for filename in get_libraries():
assert not os.system("mv '%s' ." % (filename,))
assert not os.system("rm -rf build")
|
eunchong/build | refs/heads/master | third_party/cherrypy/_cpdispatch.py | 82 | """CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = inspect.getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw, defaults) = inspect.getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message="Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message="Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message="Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message="Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError("The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
#Don't expose the hidden 'index' token to _cp_dispatch
#We skip this if pre_len == 1 since it makes no sense
#to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
#We didn't find a path, but keep processing in case there
#is a default() handler.
iternames.pop(0)
else:
#We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
#No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
#Assume that the handler used the current path segment, but
#did not pop it. This allows things like
#return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config."""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + '/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(i+1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See http://www.cherrypy.org/ticket/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper()
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See http://www.cherrypy.org/ticket/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
|
brunocanning/release | refs/heads/master | src/annotations2html/bin/build-doc.py | 5 | #!/usr/bin/env python2
import os
import re
import sys
import errno
import tempfile
from subprocess import Popen, PIPE
import shutil
import time
from time import ctime
import argparse
import logging
from collections import defaultdict
from operator import itemgetter
from lxml import etree
from mako.lookup import TemplateLookup
from mako import exceptions
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(BINDIR, "..", "lib", "python"))
log = logging.getLogger("annotations2html")
template_re = re.compile(r"^\s*(?:(?:object|structure|declaration|unique)\s+)*"
r"template\s+(\S+)\s*;")
panc_annotations = "panc-annotations"
# TEMPLATES_BYNAME is an indexed keyed by template name
# (the template name may be ambiguous due to LOADPATH)
TEMPLATES_BYNAME = defaultdict(list)
# create some namespaced tagnames
ns = "http://quattor.org/pan/annotations"
tags = dict()
for t in ["desc", "section"]:
tags[t] = "{%s}%s" % (ns, t)
def is_template(name):
if name.endswith((".pan", ".tpl")):
return True
return False
def parse_panc_errors(text):
ret = defaultdict(list)
filename = None
errortxt = None
lines = None
output = list()
relativeize = re.compile(os.getcwd() + "/(.*)\\.((pan)|(tpl))")
error = re.compile(r'(?P<err>[\w\s]+) \[(?P<file>.*):(?P<lines>[0-9.-]+)\]$')
for line in text.split("\n"):
m = error.match(line)
if m:
# flush out old error
if filename is not None:
ret[filename].append([errortxt, lines, output])
parts = m.groupdict()
filename = parts["file"]
# filename emmitted by panc is a full absolute pathname.
# we want to work out a normalized name
filename = os.path.relpath(filename, os.getcwd())
errortxt = parts["err"]
lines = parts["lines"]
output = list()
else:
output.append(line)
# flush out old error
if filename is not None:
ret[filename].append([errortxt, lines, output])
return ret
def render_one(outfile, transform, args):
try:
result = transform.render(**args)
except:
print exceptions.text_error_template().render()
log.debug("writing %s", outfile)
with open(outfile, 'w') as fd:
fd.write(result)
def annotate(templates, index, state, base_path, outdir, render_at_end):
if not templates:
return
# Get a temporary directory. panc annotations
# will be dumped into directory names corresponding
# to the template namespace, so we need a small tree
tmpdir = tempfile.mkdtemp()
try:
os.makedirs(os.path.join(tmpdir, base_path))
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
deferred = []
try:
# Run "panc" to get the annotation data
args = [panc_annotations, "--output-dir", tmpdir]
args.extend([os.path.join(base_path, tpl) for tpl in templates])
log.info("invoking %s", " ".join(args))
# Make sure output from pan is not mixed with output from this script
sys.stdout.flush()
p = Popen(args, stderr=PIPE, stdout=PIPE)
out, err = p.communicate()
log.info("command returned %d", p.returncode)
log.debug("command stdout:\n%s", out)
log.debug("command stderr:\n%s", err)
errors = parse_panc_errors(out)
important = "documentation.annotation.xml"
# And now parse all the annotations that we received
for root, dirs, files in os.walk(tmpdir, topdown=True):
files.sort()
try:
# Try and move the important file to the front,
# but it may not exist. If we can't never mind -
# just process the sorted list.
files.remove(important)
files.insert(0, important)
except:
pass
relpath = os.path.relpath(root, tmpdir)
if relpath not in state:
state[relpath] = state[os.path.dirname(relpath)].copy()
for file in files:
if not file.endswith(".xml"):
continue
log.debug("parsing %s/%s", relpath, file)
# Create a filename to dump the HTML output
htmlname = os.path.join(relpath, file)
htmlname = re.sub(r'\.xml$', '.html', htmlname)
htmlname = re.sub(r'/', '.', htmlname)
outfile = os.path.join(outdir, htmlname)
xml = etree.parse(os.path.join(root, file))
# Work out what section we're in - we allow
# the input templates to modify this (so that you
# can have a documentation.tpl in a directory
# that sets the section name for all subsequent
# templates)
section = xml.findtext("{%s}section" % ns)
if section is not None:
log.info("found section %s for path %s", section, relpath)
state[relpath]['section'] = section
section = state[relpath]['section']
# Generate some text representing the template itself
source = file[0:-len(".annotation.xml")]
tplname = os.path.join(relpath, source)
myerrors = None
errname = os.path.join(relpath, file)
errname = errname[0:-len(".annotation.xml")]
if errname in errors:
myerrors = errors[errname]
mtime = ctime(os.stat(tplname).st_mtime)
sourcefd = open(tplname, "r")
tplsource = fix_unicode(sourcefd.read())
tnode = xml.getroot()
# The source-range will provide the pointer to the template
if tnode is not None and 'source-range' in tnode.attrib:
srange = tnode.attrib['source-range']
lines = tplsource.split("\n")
(start, end) = srange.split('-', 2)
(sline, schar) = start.split('.', 2)
(eline, echar) = end.split('.', 2)
declaration = "\n".join(lines[int(sline) - 1:int(eline)])
m = re.search(r'template ([^;]*)', declaration)
if m:
# We modify the template name to allow for loadpath
tplname = m.group(1)
if base_path != "":
entry_title = "%s (%s)" % (tplname, base_path)
else:
entry_title = "%s (top level)" % tplname
TEMPLATES_BYNAME[tplname].append((htmlname, entry_title))
if len(TEMPLATES_BYNAME[tplname]) > 1:
TEMPLATES_BYNAME[tplname].sort()
title = xml.findtext("{%s}title" % ns)
if title is None:
title = tplname
content = None
# Indexing
for fn in xml.findall("{%s}function" % ns):
index_add(index, "functions", "%s()" % fn.attrib['name'],
htmlname, entry_title)
content = True
for var in xml.findall("{%s}variable" % ns):
index_add(index, "global variables", var.attrib['name'],
htmlname, entry_title)
content = True
index_add(index, section, title, htmlname, entry_title)
if section not in transformers:
transformers[section] = get_transform(section + "_template")
transform = transformers[section]
# This is incredibly memory hungry, but we're going to
# just stash things in memory and then render at the end,
# allowing files to contain cross-referencing indexes.
# if memory usage gets too bad, then we can drop the
# cross-referencing within specific files and render as we
# go...
context = {'title': title,
'tplname': tplname,
'htmlname': htmlname,
'navigation': index,
'source': tplsource,
'errors': myerrors,
'section': section,
'mtime': mtime,
'xml': xml,
'ns': ns,
'state': state[relpath],
'index': TEMPLATES_BYNAME}
if render_at_end:
deferred.append((outfile, transform, context))
else:
render_one(outfile, transform, context)
finally:
shutil.rmtree(tmpdir)
return deferred
def fix_unicode(value):
""" Remove unusable characters from unreliable data sources """
if value is None:
return value
try:
unicode(value, "ascii")
except UnicodeError:
return unicode(value, "utf-8", "replace")
else:
return value
def index_add(index, section, title, href, text):
if section not in index:
index[section] = dict()
if title not in index[section]:
index[section][title] = []
index[section][title].append((href, text))
if len(index[section][title]) > 1:
index[section][title].sort(key=itemgetter(0))
def build_toplevels(out, index):
log.info("building index files")
for section in index.keys():
log.info("building %s", section)
idxbody = []
s = section.capitalize()
if not s.endswith('s'):
if s.endswith('y'):
s = "%sies" % s[:-1]
else:
s = "%ss" % s
if s == "Unclassifieds":
s = "Unclassified Templates"
if section in index:
titles = index[section].keys()
titles.sort(key=lambda x: x.lower())
for title in titles:
hreflist = ",<br>".join("<a href='%s'>%s</a>" % (href, text)
for (href, text) in index[section][title])
if len(index[section][title]) == 1 and \
index[section][title][0][1] == title:
idxbody.append("<dt>%s</dt><dd></dd>" % hreflist)
else:
idxbody.append("<dt>%s</dt><dd>%s</dd>" % (title, hreflist))
safe = section
for x in [" ", "/", "\\"]:
safe = safe.replace(x, "_")
transform = get_transform(safe, default="toplevel")
context = {'body': idxbody,
'navigation': index,
'section': section,
'mtime': time.asctime(),
'title': s}
outfile = os.path.join(out, "%s.html" % safe)
result = None
result = transform.render(**context)
log.debug("writing %s", outfile)
with open(outfile, 'w') as fd:
fd.write(result)
def get_transform(section, default='unclassified_template'):
if section.find("/") >= 0:
raise Exception("Sections cannot be hierarchial")
f = os.path.join(BINDIR, "..", "lib", "templates", "%s.mako" % section)
if not os.path.exists(f):
section = default
lookup = TemplateLookup([os.path.join(BINDIR, "..", "lib", "templates")],
output_encoding='utf-8', encoding_errors='replace')
tpl = lookup.get_template("%s.mako" % section)
return tpl
transformers = dict()
transformers["unclassified"] = get_transform("unclassified")
def detect_template_basedir(top, filename):
# If top = "/path", filename = "/path/a/b/c.tpl", and it starts with
# "template b/c;", then we want to return (a, b/c.tpl)
template_name = None
relpath, ext = os.path.splitext(os.path.relpath(filename, top))
with open(filename, "r") as fd:
for line in fd:
res = template_re.match(line)
if not res:
continue
template_name = res.group(1)
break
if template_name and (relpath == template_name or
relpath.endswith("/" + template_name)):
if relpath == template_name:
return ".", template_name + ext
else:
return relpath[:-len(template_name) - 1], template_name + ext
else:
# Either we failed to parse the template, or the template name was not
# correct. The safe choice is to parse the template alone (well, at most
# together with other templates in the same directory), and let panc
# sort it out.
return os.path.dirname(relpath), os.path.basename(relpath) + ext
def chunk_list(list_, chunk_size):
for i in xrange(0, len(list_), chunk_size):
yield list_[i:i + chunk_size]
# Usage: %prog [options] <SOURCE> <OUTPUT>
# e.g. to read templates starting at /my/templates
# and to write them out into /var/htdocs/templates, you would
# %prog /my/templates /var/htdocs/templates
#
# An index.html file, and a set of .html files corresponding
# to template names will be created. You should have a
# css file called "annotations.css" in the output directory to
# get useful HTML viewing.
# This processes single directories at a time. Which incurs
# a hefty penalty in firing off java each time, however we
# can't really do this "in aggregate" across all directories
# because the annotation output filename is based on the
# template name, and that is not unique (there may be multiple
# template names the same in different loadpath contexts)
def main():
parser = argparse.ArgumentParser(description="Generate template documentation")
parser.add_argument("-d", "--debug", dest="debug", action="count",
help="Extra output. Specify twice for even more")
parser.add_argument("-j", "--java", dest="java", metavar='JAVA_HOME',
help="Location of the JRE")
parser.add_argument("--norender_at_end", dest="render_at_end",
action="store_false", default=True,
help="Render immediately. Reduces memory usage, but "
"breaks cross referencing")
parser.add_argument("source", help="Directory where the templates can be found")
parser.add_argument("output", help="Output directory")
args = parser.parse_args()
if args.java is not None:
os.environ["JAVA_HOME"] = args.java
if args.debug > 1:
logging.basicConfig(level=logging.DEBUG)
elif args.debug == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
top = args.source
# If the chdir fails, the exception is just what we want.
os.chdir(top)
out = args.output
if not os.path.exists(out):
os.makedirs(out)
# Always copy across the latest CSS
css = os.path.join(BINDIR, "..", "lib", "annotations.css")
shutil.copyfile(css, os.path.join(out, "annotations.css"))
log.warning("starting %s %s %s", sys.argv[0], top, out)
index = dict()
index['contents'] = dict()
state = dict()
state[""] = dict({'section': 'unclassified'})
work_queue = []
template_bases = defaultdict(list)
for root, dirs, files in os.walk(top, topdown=True):
# Make sure we don't descend into mgmt directories (e.g. .git)
for i in range(len(dirs) - 1, -1, -1):
if dirs[i].startswith("."):
del dirs[i]
if root == top and dirs[i] == 't':
# 't' is considered to be a "test" directory - ignore it
del dirs[i]
for f in files:
if not is_template(f):
continue
basedir, tplpath = detect_template_basedir(top, os.path.join(root, f))
template_bases[basedir].append(tplpath)
for relpath in sorted(template_bases.keys()):
log.info("scanning templates relative to %s", relpath)
tpls = template_bases[relpath]
if relpath == ".":
root = top
else:
root = os.path.join(top, relpath)
for chunk in chunk_list(tpls, 1000):
deferred = annotate(chunk, index, state, relpath, out,
args.render_at_end)
if deferred:
work_queue.extend(deferred)
log.info("rendering html")
for (outfile, transform, args) in work_queue:
render_one(outfile, transform, args)
build_toplevels(out, index)
if __name__ == '__main__':
main()
|
SiennaStellar/linux-3.10.20_kelleni | refs/heads/master | tools/perf/scripts/python/net_dropmonitor.py | 2669 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
anasazi/POP-REU-Project | refs/heads/master | pkgs/libs/libxml2/src/python/tests/reader2.py | 87 | #!/usr/bin/python -u
#
# this tests the DTD validation with the XmlTextReader interface
#
import sys
import glob
import string
import StringIO
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
err=""
expect="""../../test/valid/rss.xml:177: element rss: validity error : Element rss does not carry attribute version
</rss>
^
../../test/valid/xlink.xml:450: element termdef: validity error : ID dt-arc already defined
<p><termdef id="dt-arc" term="Arc">An <ter
^
../../test/valid/xlink.xml:530: validity error : attribute def line 199 references an unknown ID "dt-xlg"
^
"""
def callback(ctx, str):
global err
err = err + "%s" % (str)
libxml2.registerErrorHandler(callback, "")
valid_files = glob.glob("../../test/valid/*.x*")
valid_files.sort()
for file in valid_files:
if string.find(file, "t8") != -1:
continue
reader = libxml2.newTextReaderFilename(file)
#print "%s:" % (file)
reader.SetParserProp(libxml2.PARSER_VALIDATE, 1)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print "Error parsing and validating %s" % (file)
#sys.exit(1)
if err != expect:
print err
#
# another separate test based on Stephane Bidoul one
#
s = """
<!DOCTYPE test [
<!ELEMENT test (x,b)>
<!ELEMENT x (c)>
<!ELEMENT b (#PCDATA)>
<!ELEMENT c (#PCDATA)>
<!ENTITY x "<x><c>xxx</c></x>">
]>
<test>
&x;
<b>bbb</b>
</test>
"""
expect="""10,test
1,test
14,#text
1,x
1,c
3,#text
15,c
15,x
14,#text
1,b
3,#text
15,b
14,#text
15,test
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test2")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s,%s\n" % (reader.NodeType(),reader.Name())
if res != expect:
print "test2 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test2 failed: validation error found"
print err
sys.exit(1)
#
# Another test for external entity parsing and validation
#
s = """<!DOCTYPE test [
<!ELEMENT test (x)>
<!ELEMENT x (#PCDATA)>
<!ENTITY e SYSTEM "tst.ent">
]>
<test>
&e;
</test>
"""
tst_ent = """<x>hello</x>"""
expect="""10 test
1 test
14 #text
1 x
3 #text
15 x
14 #text
15 test
"""
res=""
def myResolver(URL, ID, ctxt):
if URL == "tst.ent":
return(StringIO.StringIO(tst_ent))
return None
libxml2.setEntityLoader(myResolver)
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test3")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s\n" % (reader.NodeType(),reader.Name())
if res != expect:
print "test3 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test3 failed: validation error found"
print err
sys.exit(1)
#
# Another test for recursive entity parsing, validation, and replacement of
# entities, making sure the entity ref node doesn't show up in that case
#
s = """<!DOCTYPE test [
<!ELEMENT test (x, x)>
<!ELEMENT x (y)>
<!ELEMENT y (#PCDATA)>
<!ENTITY x "<x>&y;</x>">
<!ENTITY y "<y>yyy</y>">
]>
<test>
&x;
&x;
</test>"""
expect="""10 test 0
1 test 0
14 #text 1
1 x 1
1 y 2
3 #text 3
15 y 2
15 x 1
14 #text 1
1 x 1
1 y 2
3 #text 3
15 y 2
15 x 1
14 #text 1
15 test 0
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test4")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s %d\n" % (reader.NodeType(),reader.Name(),reader.Depth())
if res != expect:
print "test4 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test4 failed: validation error found"
print err
sys.exit(1)
#
# The same test but without entity substitution this time
#
s = """<!DOCTYPE test [
<!ELEMENT test (x, x)>
<!ELEMENT x (y)>
<!ELEMENT y (#PCDATA)>
<!ENTITY x "<x>&y;</x>">
<!ENTITY y "<y>yyy</y>">
]>
<test>
&x;
&x;
</test>"""
expect="""10 test 0
1 test 0
14 #text 1
5 x 1
14 #text 1
5 x 1
14 #text 1
15 test 0
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test5")
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s %d\n" % (reader.NodeType(),reader.Name(),reader.Depth())
if res != expect:
print "test5 failed: unexpected output"
print res
if err != "":
print "test5 failed: validation error found"
print err
#
# cleanup
#
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
Jannes123/inasafe | refs/heads/develop | safe/impact_functions/generic/classified_polygon_building/test/__init__.py | 229 | __author__ = 'akbar'
|
ralphbean/raptorizemw | refs/heads/master | examples/tg2-raptorized/tg2raptorized/config/environment.py | 1 | # -*- coding: utf-8 -*-
"""WSGI environment setup for tg2-raptorized."""
from tg2raptorized.config.app_cfg import base_config
__all__ = ['load_environment']
#Use base_config to setup the environment loader function
load_environment = base_config.make_load_environment()
|
yongtang/tensorflow | refs/heads/master | tensorflow/tools/docs/base_dir.py | 5 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Opensource base_dir configuration for tensorflow doc-generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils
from os import path
import keras_preprocessing
import tensorboard
import tensorflow as tf
import tensorflow_estimator
try:
import keras # pylint: disable=g-import-not-at-top
except ImportError:
pass
def get_base_dirs_and_prefixes(code_url_prefix):
"""Returns the base_dirs and code_prefixes for OSS TensorFlow api gen."""
base_dir = path.dirname(tf.__file__)
if distutils.version.LooseVersion(tf.__version__) >= "2.6":
base_dirs = [
base_dir,
path.dirname(keras.__file__),
path.dirname(keras_preprocessing.__file__),
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
]
elif distutils.version.LooseVersion(tf.__version__) >= "2.2":
base_dirs = [
base_dir,
path.dirname(keras_preprocessing.__file__),
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
]
else:
base_dirs = [
path.normpath(path.join(base_dir, "../tensorflow_core")),
path.dirname(keras_preprocessing.__file__),
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
]
if distutils.version.LooseVersion(tf.__version__) >= "2.6":
code_url_prefixes = (
code_url_prefix,
"https://github.com/keras-team/keras/tree/master/keras",
"https://github.com/keras-team/keras-preprocessing/tree/master/keras_preprocessing",
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
else:
code_url_prefixes = (
code_url_prefix,
"https://github.com/keras-team/keras-preprocessing/tree/master/keras_preprocessing",
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
return base_dirs, code_url_prefixes
|
sabi0/intellij-community | refs/heads/master | python/testData/codeInsight/smartEnter/docstring.py | 83 | """<caret>some text" |
T-002/obd2datacollectorpi | refs/heads/master | logwatcher/logwatcher.py | 1 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2013-2014 Christian Schwarz
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from configuration import CONFIGURATION
import threading, time
class LogWatcher(threading.Thread):
"""The LogWatcher looks periodically into a directory and reads the logs stored inside."""
def __init__(self, dataPath):
"""Initializes the LogWatcher.
:param String dataPath: Directory where the data will be stored.
"""
super(LogWatcher, self).__init__()
self.logEnding = CONFIGURATION["logending"]
self._dataPath = dataPath
self._newestFile = None
self._currentFile = None
self._fileHandle = None
self._currentData = []
self._lock = threading.Lock()
self._watchLogs = True
self.start()
def shutdown(self):
"""Shuts down the LogWatcher."""
self._watchLogs = False
self.join()
def check_for_new_logfile(self):
"""Checks, if a more current log file can be found."""
def run(self):
"""Runs the LogWatcher."""
## open the nesest log file.
self.check_for_new_logfile()
self._fileHandle = file(self._newestFile, "rb")
while self._watchLogs:
## sleep for 50ms
time.sleep(0.05)
## open the new log file, if it exists.
self.check_for_new_logfile()
if self._currentFile != self._newestFile:
self._currentFile.close()
self._fileHandle = file(self._newestFile, "rb")
self._currentFile = self._newestFile
## read all entries that are available right now
self._lock.acquire()
self._currentData.append(self._fileHandle.readlines())
self._lock.release()
def get_current_data(self):
"""Returns a dictionary containing the current LogData.
:return: Returns a list containing all data collected since the last call.
:rtype: List.
"""
self._lock.acquire()
result = self._currentData
self._currentData = []
self._lock.release()
return self._currentData
|
jbkkd/django-taggit | refs/heads/master | taggit/views.py | 24 | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.views.generic.list import ListView
from taggit.models import Tag, TaggedItem
def tagged_object_list(request, slug, queryset, **kwargs):
if callable(queryset):
queryset = queryset()
tag = get_object_or_404(Tag, slug=slug)
qs = queryset.filter(pk__in=TaggedItem.objects.filter(
tag=tag, content_type=ContentType.objects.get_for_model(queryset.model)
).values_list("object_id", flat=True))
if "extra_context" not in kwargs:
kwargs["extra_context"] = {}
kwargs["extra_context"]["tag"] = tag
return ListView.as_view(request, qs, **kwargs)
|
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Pygments-1.3.1/build/lib.linux-i686-2.7/pygments/filter.py | 75 | # -*- coding: utf-8 -*-
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'function': f,
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__
})
class Filter(object):
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable-msg=E1102
for ttype, value in self.function(lexer, stream, self.options):
yield ttype, value
|
emrehayirci/deps | refs/heads/master | posts/urls.py | 1 | from django.conf.urls import url
from .views import details, index, create_post, create_comment, remove_comment
urlpatterns = [
url(r'^(?P<post_id>[0-9])/comments/(?P<comment_id>[0-9])/$', remove_comment, name='remove-comment'),
url(r'^(?P<post_id>[0-9])/comments/$', create_comment, name='create-comment'),
url(r'^(?P<post_id>[0-9])/$', details, name='details-post'),
url(r'^new/$', create_post, name='crete-post'),
url(r'^$', index, name='home'),
]
|
googleads/googleads-python-lib | refs/heads/master | googleads/ad_manager.py | 1 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client library for the Ad Manager API."""
import csv
import datetime
import logging
import numbers
import os
import sys
import time
from urllib.request import build_opener
import pytz
import googleads.common
import googleads.errors
# The default application name.
DEFAULT_APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
# The endpoint server for Ad Manager.
DEFAULT_ENDPOINT = 'https://ads.google.com'
# The suggested page limit per page fetched from the API.
SUGGESTED_PAGE_LIMIT = 500
# The chunk size used for report downloads.
_CHUNK_SIZE = 16 * 1024
_data_downloader_logger = logging.getLogger(
'%s.%s' % (__name__, 'data_downloader'))
# A giant dictionary of Ad Manager versions and the services they support.
_SERVICE_MAP = {
'v202008':
('ActivityGroupService', 'ActivityService', 'AdExclusionRuleService',
'AdjustmentService', 'AdRuleService', 'AudienceSegmentService',
'CdnConfigurationService', 'CmsMetadataService', 'CompanyService',
'ContactService', 'ContentBundleService', 'ContentService',
'CreativeReviewService', 'CreativeService', 'CreativeSetService',
'CreativeTemplateService', 'CreativeWrapperService',
'CustomFieldService', 'CustomTargetingService',
'DaiAuthenticationKeyService', 'DaiEncodingProfileService',
'ForecastService', 'InventoryService', 'LabelService',
'LineItemCreativeAssociationService', 'LineItemService',
'LineItemTemplateService', 'LiveStreamEventService',
'MobileApplicationService', 'NativeStyleService', 'NetworkService',
'OrderService', 'PlacementService', 'ProposalLineItemService',
'ProposalService', 'PublisherQueryLanguageService', 'ReportService',
'SiteService', 'StreamActivityMonitorService',
'SuggestedAdUnitService', 'TeamService', 'TargetingPresetService',
'UserService', 'UserTeamAssociationService'),
'v202011':
('ActivityGroupService', 'ActivityService', 'AdExclusionRuleService',
'AdjustmentService', 'AdRuleService', 'AudienceSegmentService',
'CdnConfigurationService', 'CmsMetadataService', 'CompanyService',
'ContactService', 'ContentBundleService', 'ContentService',
'CreativeReviewService', 'CreativeService', 'CreativeSetService',
'CreativeTemplateService', 'CreativeWrapperService',
'CustomFieldService', 'CustomTargetingService',
'DaiAuthenticationKeyService', 'DaiEncodingProfileService',
'ForecastService', 'InventoryService', 'LabelService',
'LineItemCreativeAssociationService', 'LineItemService',
'LineItemTemplateService', 'LiveStreamEventService',
'MobileApplicationService', 'NativeStyleService', 'NetworkService',
'OrderService', 'PlacementService', 'ProposalLineItemService',
'ProposalService', 'PublisherQueryLanguageService', 'ReportService',
'SiteService', 'StreamActivityMonitorService',
'SuggestedAdUnitService', 'TeamService', 'TargetingPresetService',
'UserService', 'UserTeamAssociationService'),
'v202102':
('ActivityGroupService', 'ActivityService', 'AdExclusionRuleService',
'AdjustmentService', 'AdRuleService', 'AudienceSegmentService',
'CdnConfigurationService', 'CmsMetadataService', 'CompanyService',
'ContactService', 'ContentBundleService', 'ContentService',
'CreativeReviewService', 'CreativeService', 'CreativeSetService',
'CreativeTemplateService', 'CreativeWrapperService',
'CustomFieldService', 'CustomTargetingService',
'DaiAuthenticationKeyService', 'DaiEncodingProfileService',
'ForecastService', 'InventoryService', 'LabelService',
'LineItemCreativeAssociationService', 'LineItemService',
'LineItemTemplateService', 'LiveStreamEventService',
'MobileApplicationService', 'NativeStyleService', 'NetworkService',
'OrderService', 'PlacementService', 'ProposalLineItemService',
'ProposalService', 'PublisherQueryLanguageService', 'ReportService',
'SiteService', 'StreamActivityMonitorService',
'SuggestedAdUnitService', 'TeamService', 'TargetingPresetService',
'UserService', 'UserTeamAssociationService'),
'v202105':
('ActivityGroupService', 'ActivityService', 'AdExclusionRuleService',
'AdjustmentService', 'AdRuleService', 'AudienceSegmentService',
'CdnConfigurationService', 'CmsMetadataService', 'CompanyService',
'ContactService', 'ContentBundleService', 'ContentService',
'CreativeReviewService', 'CreativeService', 'CreativeSetService',
'CreativeTemplateService', 'CreativeWrapperService',
'CustomFieldService', 'CustomTargetingService',
'DaiAuthenticationKeyService', 'DaiEncodingProfileService',
'ForecastService', 'InventoryService', 'LabelService',
'LineItemCreativeAssociationService', 'LineItemService',
'LineItemTemplateService', 'LiveStreamEventService',
'MobileApplicationService', 'NativeStyleService', 'NetworkService',
'OrderService', 'PlacementService', 'ProposalLineItemService',
'ProposalService', 'PublisherQueryLanguageService', 'ReportService',
'SiteService', 'StreamActivityMonitorService',
'SuggestedAdUnitService', 'TeamService', 'TargetingPresetService',
'UserService', 'UserTeamAssociationService'),
}
class AdManagerClient(googleads.common.CommonClient):
"""A central location to set headers and create web service clients.
Attributes:
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize your
requests.
application_name: An arbitrary string which will be used to identify your
application
network_code: A string identifying the network code of the network you are
accessing. All requests other than some NetworkService calls require
this header to be set.
"""
# The key in the storage yaml which contains Ad Manager data.
_YAML_KEY = 'ad_manager'
# A list of values which must be provided to use Ad Manager.
_REQUIRED_INIT_VALUES = ('application_name',)
# A list of values which may optionally be provided when using Ad Manager.
_OPTIONAL_INIT_VALUES = (
'network_code', googleads.common.ENABLE_COMPRESSION_KEY)
# The format of SOAP service WSDLs. A server, version, and service name need
# to be formatted in.
_SOAP_SERVICE_FORMAT = '%s/apis/ads/publisher/%s/%s?wsdl'
@classmethod
def LoadFromString(cls, yaml_doc):
"""Creates a AdManagerClient with information stored in a yaml string.
Args:
yaml_doc: The yaml string containing the cached Ad Manager data.
Returns:
A AdManagerClient initialized with the values cached in the yaml string.
Raises:
A GoogleAdsValueError if the given yaml string does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing.
"""
return cls(**googleads.common.LoadFromString(
yaml_doc, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES))
@classmethod
def LoadFromStorage(cls, path=None):
"""Creates a AdManagerClient with information stored in a yaml file.
Args:
[optional]
path: str The path to the file containing cached Ad Manager data.
Returns:
A AdManagerClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing.
"""
if path is None:
path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')
return cls(**googleads.common.LoadFromStorage(
path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES))
def __init__(self, oauth2_client, application_name, network_code=None,
cache=None, proxy_config=None, timeout=3600,
custom_http_headers=None,
enable_compression=False):
"""Initializes a AdManagerClient.
For more information on these arguments, see our SOAP headers guide:
https://developers.google.com/ad-manager/docs/soap_xml
Args:
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize
your requests.
application_name: An arbitrary string which will be used to identify your
application
[optional]
network_code: A string identifying the network code of the network you are
accessing. All requests other than getAllNetworks require this header
to be set.
cache: A subclass of zeep.cache.Base. If not set,
this will default to a basic file cache. To disable caching for Zeep,
pass googleads.common.ZeepServiceProxy.NO_CACHE.
proxy_config: A googleads.common.ProxyConfig instance or None if a proxy
isn't being used.
timeout: An integer timeout in MS for connections made to Ad Manager.
custom_http_headers: A dictionary with HTTP headers to add to outgoing
requests.
enable_compression: A boolean indicating if you want to enable compression
of the SOAP response. If True, the SOAP response will use gzip
compression, and will be decompressed for you automatically.
"""
super(AdManagerClient, self).__init__()
if not application_name or (DEFAULT_APPLICATION_NAME in application_name):
raise googleads.errors.GoogleAdsValueError(
'Application name must be set and not contain the default [%s]' %
DEFAULT_APPLICATION_NAME)
self.oauth2_client = oauth2_client
self.application_name = application_name
self.network_code = network_code
self.cache = cache
self.custom_http_headers = custom_http_headers
self._header_handler = _AdManagerHeaderHandler(
self, enable_compression, custom_http_headers)
self.proxy_config = (proxy_config if proxy_config
else googleads.common.ProxyConfig())
if enable_compression:
self.application_name = '%s (gzip)' % self.application_name
self.timeout = timeout
def GetService(self, service_name, version=sorted(_SERVICE_MAP.keys())[-1],
server=None):
"""Creates a service client for the given service.
Args:
service_name: A string identifying which Ad Manager service to create a
service client for.
[optional]
version: A string identifying the Ad Manager version to connect to. This
defaults to what is currently the latest version. This will be
updated in future releases to point to what is then the
latest version.
server: A string identifying the webserver hosting the Ad Manager API.
Returns:
A googleads.common.GoogleSoapService instance which has the headers
and proxy configured for use.
Raises:
A GoogleAdsValueError if the service or version provided do not exist.
"""
if not server:
server = DEFAULT_ENDPOINT
server = server[:-1] if server[-1] == '/' else server
try:
service = googleads.common.GetServiceClassForLibrary()(
self._SOAP_SERVICE_FORMAT % (server, version, service_name),
self._header_handler,
_AdManagerPacker,
self.proxy_config,
self.timeout,
version,
cache=self.cache)
return service
except googleads.errors.GoogleAdsSoapTransportError:
if version in _SERVICE_MAP:
if service_name in _SERVICE_MAP[version]:
raise
else:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized service for the Ad Manager API. Service given: %s '
'Supported services: %s'
% (service_name, _SERVICE_MAP[version]))
else:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized version of the Ad Manager API. Version given: %s '
'Supported versions: %s' % (version, _SERVICE_MAP.keys()))
def GetDataDownloader(self, version=sorted(_SERVICE_MAP.keys())[-1],
server=None):
"""Creates a downloader for Ad Manager reports and PQL result sets.
This is a convenience method. It is functionally identical to calling
DataDownloader(ad_manager_client, version, server)
Args:
[optional]
version: A string identifying the Ad Manager version to connect to.
This defaults to what is currently the latest version. This will be
updated in future releases to point to what is then the
latest version.
server: A string identifying the webserver hosting the Ad Manager API.
Returns:
A DataDownloader tied to this AdManagerClient, ready to download reports.
"""
if not server:
server = DEFAULT_ENDPOINT
return DataDownloader(self, version, server)
class _AdManagerHeaderHandler(googleads.common.HeaderHandler):
"""Handler which sets the headers for an Ad Manager SOAP call."""
# The library signature for Ad Manager, to be appended to all
# application_names.
_PRODUCT_SIG = 'DfpApi-Python'
# The name of the WSDL-defined SOAP Header class used in all requests.
_SOAP_HEADER_CLASS = 'ns0:SoapRequestHeader'
def __init__(
self, ad_manager_client, enable_compression, custom_http_headers=None):
"""Initializes an AdManagerHeaderHandler.
Args:
ad_manager_client: The AdManagerClient whose data will be used to fill
in the headers. We retain a reference to this object so that the
header handler picks up changes to the client.
enable_compression: A boolean indicating if you want to enable compression
of the SOAP response. If True, the SOAP response will use gzip
compression, and will be decompressed for you automatically.
custom_http_headers: A dictionary of custom HTTP headers to send with all
requests.
"""
self._ad_manager_client = ad_manager_client
self.enable_compression = enable_compression
self.custom_http_headers = custom_http_headers or {}
def GetSOAPHeaders(self, create_method):
"""Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers.
"""
header = create_method(self._SOAP_HEADER_CLASS)
header.networkCode = self._ad_manager_client.network_code
header.applicationName = ''.join([
self._ad_manager_client.application_name,
googleads.common.GenerateLibSig(self._PRODUCT_SIG)])
return header
def GetHTTPHeaders(self):
"""Returns the HTTP headers required for request authorization.
Returns:
A dictionary containing the required headers.
"""
http_headers = self._ad_manager_client.oauth2_client.CreateHttpHeader()
if self.enable_compression:
http_headers['accept-encoding'] = 'gzip'
http_headers.update(self.custom_http_headers)
return http_headers
class _AdManagerPacker(googleads.common.SoapPacker):
"""A utility applying customized packing logic for Ad Manager."""
@classmethod
def Pack(cls, obj, version):
"""Pack the given object using Ad Manager-specific logic.
Args:
obj: an object to be packed for SOAP using Ad Manager-specific logic, if
applicable.
version: the version of the current API, e.g. 'v201811'
Returns:
The given object packed with Ad Manager-specific logic for SOAP,
if applicable. Otherwise, returns the given object unmodified.
"""
if isinstance(obj, (datetime.datetime, datetime.date)):
return cls.AdManagerDateTimePacker(obj, version)
return obj
@classmethod
def AdManagerDateTimePacker(cls, value, version):
"""Returns dicts formatted for Ad Manager SOAP based on date/datetime.
Args:
value: A date or datetime object to be converted.
version: the version of the current API, e.g. 'v201811'
Returns:
The value object correctly represented for Ad Manager SOAP.
"""
if isinstance(value, datetime.datetime):
if value.tzinfo is None:
raise googleads.errors.GoogleAdsValueError(
'Datetime %s is not timezone aware.' % value
)
return {
'date': cls.AdManagerDateTimePacker(value.date(), version),
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'timeZoneId': value.tzinfo.zone,
}
elif isinstance(value, datetime.date):
return {'year': value.year, 'month': value.month, 'day': value.day}
@googleads.common.RegisterUtility('StatementBuilder')
class StatementBuilder(object):
"""Provides the ability to programmatically construct PQL queries."""
class _OrderByPair(object):
"""Stores and serializes a pair of column/ascending values."""
def __init__(self, column, ascending):
"""Initializes a pair of column/ascending values.
Args:
column: a string specifying the column name.
ascending: a boolean specifying sort order ascending or descending.
"""
self.column = column
self.ascending = ascending
def __repr__(self):
"""The string representation of this class is valid PQL."""
return '%s %s' % (self.column, 'ASC' if self.ascending else 'DESC')
_SELECT_PART = 'SELECT %s FROM %s'
_WHERE_PART = 'WHERE %s'
_ORDER_BY_PART = 'ORDER BY %s'
_LIMIT_PART = 'LIMIT %s'
_OFFSET_PART = 'OFFSET %s'
def __init__(self, select_columns=None, from_table=None, where=None,
order_by=None, order_ascending=True,
limit=SUGGESTED_PAGE_LIMIT, offset=0,
version=sorted(_SERVICE_MAP.keys())[-1]):
"""Initializes StatementBuilder.
Args:
select_columns: a comma separated string of column names.
from_table: a string specifying the table to select from.
where: a string with the where clause.
order_by: a string with the order by clause.
order_ascending: a boolean specifying sort order ascending or descending.
limit: an integer with the limit clause.
offset: an integer with the offset clause.
version: A string identifying the Ad Manager version this statement is
compatible with. This defaults to what is currently the latest
version. This will be updated in future releases to point to what is
then the latest version.
"""
self._select = select_columns
self._from_ = from_table
self._where = where
self.limit = limit
self.offset = offset
self._version = version
if order_by:
self._order_by = self._OrderByPair(column=order_by,
ascending=order_ascending)
else:
self._order_by = None
self._values = {} # Use a dict to prevent duplicates
def ToStatement(self):
"""Builds a PQL string from the current state.
Returns:
A string representation of the PQL statement.
"""
if self._select and not self._from_:
raise googleads.errors.GoogleAdsError('FROM clause required with SELECT.')
if self._from_ and not self._select:
raise googleads.errors.GoogleAdsError('SELECT clause required with FROM.')
query = []
if self._select:
query.append(self._SELECT_PART % (self._select, self._from_))
if self._where:
query.append(self._WHERE_PART % self._where)
if self._order_by:
query.append(self._ORDER_BY_PART % self._order_by)
if self.limit:
query.append(self._LIMIT_PART % self.limit)
if self.offset is not None:
query.append(self._OFFSET_PART % self.offset)
return {'query': ' '.join(query),
'values': (PQLHelper.GetQueryValuesFromDict(
self._values, self._version) if self._values else None)}
def Select(self, columns):
"""Adds a SELECT clause.
Args:
columns: A comma separated string specifying the columns.
Returns:
A reference to the StatementBuilder.
"""
self._select = columns
return self
def From(self, table):
"""Adds a FROM clause.
Args:
table: A string specifying the table.
Returns:
A reference to the StatementBuilder
"""
self._from_ = table
return self
def Where(self, clause):
"""Adds a WHERE clause.
Args:
clause: A string specifying the where clause.
Returns:
A reference to the StatementBuilder.
"""
self._where = clause
return self
def Limit(self, limit=SUGGESTED_PAGE_LIMIT):
"""Adds a LIMIT clause.
Args:
limit: An integer specifying the limit value.
Returns:
A reference to the StatementBuilder.
"""
self.limit = limit
return self
def Offset(self, value):
"""Adds an OFFSET clause.
Args:
value: An integer specifying the offset value.
Returns:
A reference to the StatementBuilder.
"""
self.offset = value
return self
def OrderBy(self, column, ascending=True):
"""Adds an ORDER BY clause.
Args:
column: A string specifying the column to order by.
ascending: A bool to indicate ascending vs descending.
Returns:
A reference to the StatementBuilder
"""
self._order_by = self._OrderByPair(column=column,
ascending=ascending)
return self
def WithBindVariable(self, key, value):
"""Binds a value to a variable in the statement.
Args:
key: A string identifying the variable.
value: A object of an acceptable type specifying the value.
Returns:
A reference to the StatementBuilder.
"""
# Make this call to throw the exception here if there is a problem
PQLHelper.GetValueRepresentation(value, self._version)
self._values[key] = value
return self
class PQLHelper(object):
"""Utility class for PQL."""
@classmethod
def GetQueryValuesFromDict(cls, d, version=sorted(_SERVICE_MAP.keys())[-1]):
"""Converts a dict of python types into a list of PQL types.
Args:
d: A dictionary of variable names to python types.
version: A string identifying the Ad Manager version the values object
is compatible with. This defaults to what is currently the latest
version. This will be updated in future releases to point to what is
then the latest version.
Returns:
A list of variables formatted for PQL statements which are compatible with
a particular API version.
"""
return [{
'key': key,
'value': cls.GetValueRepresentation(value, version)
} for key, value in d.items()]
@classmethod
def GetValueRepresentation(cls, value,
version=sorted(_SERVICE_MAP.keys())[-1]):
"""Converts a single python value to its PQL representation.
Args:
value: A python value.
version: A string identifying the Ad Manager version the value object
is compatible with. This defaults to what is currently the latest
version. This will be updated in future releases to point to what is
then the latest version.
Returns:
The value formatted for PQL statements which are compatible with a
particular API version.
"""
if isinstance(value, str):
return {'value': value, 'xsi_type': 'TextValue'}
elif isinstance(value, bool):
return {'value': value, 'xsi_type': 'BooleanValue'}
elif isinstance(value, numbers.Number):
return {'value': value, 'xsi_type': 'NumberValue'}
# It's important that datetime is checked for before date
# because isinstance(datetime.datetime.now(), datetime.date) is True
elif isinstance(value, datetime.datetime):
if value.tzinfo is None:
raise googleads.errors.GoogleAdsValueError(
'Datetime %s is not timezone aware.' % value
)
return {
'xsi_type': 'DateTimeValue',
'value': {
'date': {
'year': value.year,
'month': value.month,
'day': value.day,
},
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'timeZoneId': value.tzinfo.zone,
}
}
elif isinstance(value, datetime.date):
return {
'xsi_type': 'DateValue',
'value': {
'year': value.year,
'month': value.month,
'day': value.day,
}
}
elif isinstance(value, list):
if value and not all(isinstance(x, type(value[0])) for x in value):
raise googleads.errors.GoogleAdsValueError('Cannot pass more than one '
'type in a set.')
return {
'xsi_type': 'SetValue',
'values': [cls.GetValueRepresentation(v, version) for v in value]
}
else:
raise googleads.errors.GoogleAdsValueError(
'Can\'t represent unknown type: %s.' % type(value))
@googleads.common.RegisterUtility('FilterStatement')
class FilterStatement(object):
"""A statement object for PQL and get*ByStatement queries.
The FilterStatement object allows for user control of limit/offset. It
automatically limits queries to the suggested page limit if not explicitly
set.
"""
def __init__(self, where_clause='', values=None, limit=SUGGESTED_PAGE_LIMIT,
offset=0):
self.where_clause = where_clause
self.values = values
self.limit = limit
self.offset = offset
def ToStatement(self):
"""Returns this statement object in the format Ad Manager requires."""
return {'query': ('%s LIMIT %d OFFSET %d' %
(self.where_clause, self.limit, self.offset)),
'values': self.values}
class DataDownloader(object):
"""A utility that can be used to download reports and PQL result sets."""
def __init__(self, ad_manager_client, version=sorted(_SERVICE_MAP.keys())[-1],
server=None):
"""Initializes a DataDownloader.
Args:
ad_manager_client: The AdManagerClient whose attributes will be used to
authorize your report download and PQL query requests.
[optional]
version: A string identifying the Ad Manager version to connect to.
This defaults to what is currently the latest version. This will be
updated in future releases to point to what is then the
latest version.
server: A string identifying the webserver hosting the Ad Manager API.
"""
if not server:
server = DEFAULT_ENDPOINT
if server[-1] == '/':
server = server[:-1]
self._ad_manager_client = ad_manager_client
self._version = version
self._server = server
self._report_service = None
self._pql_service = None
self.proxy_config = self._ad_manager_client.proxy_config
handlers = self.proxy_config.GetHandlers()
self.url_opener = build_opener(*handlers)
if self._ad_manager_client.custom_http_headers:
self.url_opener.addheaders.extend(
self._ad_manager_client.custom_http_headers.items())
def _GetReportService(self):
"""Lazily initializes a report service client."""
if not self._report_service:
self._report_service = self._ad_manager_client.GetService(
'ReportService', self._version, self._server)
return self._report_service
def _GetPqlService(self):
"""Lazily initializes a PQL service client."""
if not self._pql_service:
self._pql_service = self._ad_manager_client.GetService(
'PublisherQueryLanguageService', self._version, self._server)
return self._pql_service
def WaitForReport(self, report_job):
"""Runs a report, then waits (blocks) for the report to finish generating.
Args:
report_job: The report job to wait for. This may be a dictionary or an
instance of the SOAP ReportJob class.
Returns:
The completed report job's ID as a string.
Raises:
An AdManagerReportError if the report job fails to complete.
"""
service = self._GetReportService()
report_job_id = service.runReportJob(report_job)['id']
if self._version > 'v201502':
status = service.getReportJobStatus(report_job_id)
else:
status = service.getReportJob(report_job_id)['reportJobStatus']
while status != 'COMPLETED' and status != 'FAILED':
_data_downloader_logger.debug('Report job status: %s', status)
time.sleep(30)
if self._version > 'v201502':
status = service.getReportJobStatus(report_job_id)
else:
status = service.getReportJob(report_job_id)['reportJobStatus']
if status == 'FAILED':
raise googleads.errors.AdManagerReportError(report_job_id)
else:
_data_downloader_logger.debug('Report has completed successfully')
return report_job_id
def DownloadReportToFile(self, report_job_id, export_format, outfile,
include_report_properties=False,
include_totals_row=None, use_gzip_compression=True):
"""Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression.
"""
service = self._GetReportService()
if include_totals_row is None: # True unless CSV export if not specified
include_totals_row = True if export_format != 'CSV_DUMP' else False
opts = {
'exportFormat': export_format,
'includeReportProperties': include_report_properties,
'includeTotalsRow': include_totals_row,
'useGzipCompression': use_gzip_compression
}
report_url = service.getReportDownloadUrlWithOptions(report_job_id, opts)
_data_downloader_logger.info('Request Summary: Report job ID: %s, %s',
report_job_id, opts)
response = self.url_opener.open(report_url)
_data_downloader_logger.debug(
'Incoming response: %s %s REDACTED REPORT DATA', response.code,
response.msg)
while True:
chunk = response.read(_CHUNK_SIZE)
if not chunk: break
outfile.write(chunk)
def DownloadPqlResultToList(self, pql_query, values=None):
"""Downloads the results of a PQL query to a list.
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
[optional]
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
Returns:
a list of lists with the first being the header row and each subsequent
list being a row of results.
"""
results = []
self._PageThroughPqlSet(pql_query, results.append, values)
return results
def DownloadPqlResultToCsv(self, pql_query, file_handle, values=None):
"""Downloads the results of a PQL query to CSV.
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
file_handle: file the file object to write to.
[optional]
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
"""
pql_writer = csv.writer(file_handle, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
self._PageThroughPqlSet(pql_query, pql_writer.writerow, values)
def _ConvertValueForCsv(self, pql_value):
"""Sanitizes a field value from a Value object to a CSV suitable format.
Args:
pql_value: dict a dictionary containing the data for a single field of an
entity.
Returns:
str a CSV writer friendly value formatted by Value.Type.
"""
if 'value' in pql_value:
field = pql_value['value']
elif 'values' in pql_value:
field = pql_value['values']
else:
field = None
if field:
if isinstance(field, list):
if all(AdManagerClassType(single_field) == AdManagerClassType(field[0])
for single_field in field):
return ','.join([
'"%s"' % str(self._ConvertValueForCsv(single_field))
for single_field in field])
else:
raise googleads.errors.GoogleAdsValueError(
'The set value returned contains unsupported mix value types')
class_type = AdManagerClassType(pql_value)
if class_type == 'TextValue':
s = field.replace('"', '""')
# Encode UTF-8 characters for Python 2 only.
if sys.version_info.major < 3:
s = s.encode('UTF8')
return s
elif class_type == 'NumberValue':
return float(field) if '.' in field else int(field)
elif class_type == 'DateTimeValue':
return self._ConvertDateTimeToOffset(field)
elif class_type == 'DateValue':
return datetime.date(int(field['date']['year']),
int(field['date']['month']),
int(field['date']['day'])).isoformat()
else:
return field
else:
return '-'
def _PageThroughPqlSet(self, pql_query, output_function, values):
"""Pages through a pql_query and performs an action (output_function).
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
output_function: the function to call to output the results (csv or in
memory)
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
"""
if isinstance(values, dict):
values = PQLHelper.GetQueryValuesFromDict(values, self._version)
pql_service = self._GetPqlService()
current_offset = 0
while True:
query_w_limit_offset = '%s LIMIT %d OFFSET %d' % (pql_query,
SUGGESTED_PAGE_LIMIT,
current_offset)
response = pql_service.select({'query': query_w_limit_offset,
'values': values})
if 'rows' in response:
# Write the header row only on first pull
if current_offset == 0:
header = response['columnTypes']
output_function([label['labelName'] for label in header])
entities = response['rows']
result_set_size = len(entities)
for entity in entities:
output_function([self._ConvertValueForCsv(value) for value
in entity['values']])
current_offset += result_set_size
if result_set_size != SUGGESTED_PAGE_LIMIT:
break
else:
break
def _ConvertDateTimeToOffset(self, date_time_value):
"""Converts the PQL formatted response for a dateTime object.
Output conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.'
Args:
date_time_value: dict The date time value from the PQL response.
Returns:
str: A string representation of the date time value uniform to
ReportService.
"""
date_time_obj = datetime.datetime(int(date_time_value['date']['year']),
int(date_time_value['date']['month']),
int(date_time_value['date']['day']),
int(date_time_value['hour']),
int(date_time_value['minute']),
int(date_time_value['second']))
date_time_str = pytz.timezone(
date_time_value['timeZoneId']).localize(date_time_obj).isoformat()
if date_time_str[-5:] == '00:00':
return date_time_str[:-6] + 'Z'
else:
return date_time_str
def AdManagerClassType(value):
"""Returns the class type for an object.
Args:
value: generic object to return type for.
Returns:
str: A string representation of the value response type.
"""
return value.__class__.__name__
|
SCOAP3/invenio | refs/heads/master | invenio/modules/sequencegenerator/models.py | 18 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
SeqUtils database models.
"""
from invenio.ext.sqlalchemy import db
class SeqSTORE(db.Model):
"""Represents a SeqSTORE record."""
__tablename__ = 'seqSTORE'
id = db.Column(
db.Integer(15, unsigned=True),
primary_key=True, nullable=False,
autoincrement=True
)
seq_name = db.Column(db.String(15))
seq_value = db.Column(db.String(20))
__table_args__ = (db.Index('seq_name_value', seq_name, seq_value,
unique=True),
db.Model.__table_args__)
__all__ = ['SeqSTORE']
|
Ubuntu-Solutions-Engineering/conjure | refs/heads/master | conjureup/controllers/base/showsteps/tui.py | 9 | from conjureup import controllers
class ShowStepsController:
def render(self):
controllers.use('configapps').render()
_controller_class = ShowStepsController
|
googlearchive/big-rig | refs/heads/master | app/src/thirdparty/telemetry/internal/backends/facebook_credentials_backend.py | 31 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.backends import form_based_credentials_backend
class FacebookCredentialsBackend(
form_based_credentials_backend.FormBasedCredentialsBackend):
@property
def logged_in_javascript(self):
"""Evaluates to true iff already logged in."""
return ('document.getElementById("fbNotificationsList")!== null || '
'document.getElementById("m_home_notice")!== null')
@property
def credentials_type(self):
return 'facebook'
@property
def url(self):
return 'http://www.facebook.com/'
@property
def login_form_id(self):
return 'login_form'
@property
def login_input_id(self):
return 'email'
@property
def password_input_id(self):
return 'pass'
class FacebookCredentialsBackend2(FacebookCredentialsBackend):
""" Facebook credential backend for https client. """
@property
def credentials_type(self):
return 'facebook2'
@property
def url(self):
return 'https://www.facebook.com/'
|
tobikausk/nest-simulator | refs/heads/master | pynest/examples/gap_junctions_two_neurons.py | 13 | # -*- coding: utf-8 -*-
#
# gap_junctions_two_neurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Gap Junctions: Two neuron example
------------------
This script simulates two Hodgkin-Huxley neurons of type `hh_psc_alpha_gap`
connected by a gap junction. Both neurons receive a constant current of
100.0 pA. The neurons are initialized with different membrane potentials and
synchronize over time due to the gap-junction connection.
"""
import nest
import pylab as pl
import numpy
nest.ResetKernel()
"""
First we set the resolution of the simulation, create two neurons and
create a `voltmeter` for recording.
"""
nest.SetKernelStatus({'resolution': 0.05})
neuron = nest.Create('hh_psc_alpha_gap', 2)
vm = nest.Create('voltmeter', params={'to_file': False,
'withgid': True,
'withtime': True,
'interval': 0.1})
"""
Then we set the constant current input, modify the inital membrane
potential of one of the neurons and connect the neurons to the `voltmeter`.
"""
nest.SetStatus(neuron, {'I_e': 100.})
nest.SetStatus([neuron[0]], {'V_m': -10.})
nest.Connect(vm, neuron, 'all_to_all')
"""
In order to create the `gap_junction` connection we employ the `all_to_all`
connection rule: Gap junctions are bidirectional connections, therefore we
need to connect `neuron[0]` to `neuron[1]` and `neuron[1]` to `neuron[0]`:
"""
nest.Connect(neuron, neuron,
{'rule': 'all_to_all', 'autapses': False},
{'model': 'gap_junction', 'weight': 0.5})
"""
Finally we start the simulation and plot the membrane potentials of
both neurons.
"""
nest.Simulate(351.)
senders = nest.GetStatus(vm, 'events')[0]['senders']
times = nest.GetStatus(vm, 'events')[0]['times']
V = nest.GetStatus(vm, 'events')[0]['V_m']
pl.figure(1)
pl.plot(times[numpy.where(senders == 1)],
V[numpy.where(senders == 1)], 'r-')
pl.plot(times[numpy.where(senders == 2)],
V[numpy.where(senders == 2)], 'g-')
pl.xlabel('time (ms)')
pl.ylabel('membrane potential (mV)')
pl.show()
|
postrational/django | refs/heads/master | tests/reserved_names/models.py | 109 | """
18. Using SQL reserved names
Need to use a reserved SQL name as a column name or table name? Need to include
a hyphen in a column or table name? No problem. Django quotes names
appropriately behind the scenes, so your database won't complain about
reserved-name usage.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Thing(models.Model):
when = models.CharField(max_length=1, primary_key=True)
join = models.CharField(max_length=1)
like = models.CharField(max_length=1)
drop = models.CharField(max_length=1)
alter = models.CharField(max_length=1)
having = models.CharField(max_length=1)
where = models.DateField(max_length=1)
has_hyphen = models.CharField(max_length=1, db_column='has-hyphen')
class Meta:
db_table = 'select'
def __str__(self):
return self.when
|
neilh10/micropython | refs/heads/master | tests/basics/getattr1.py | 105 | class A:
var = 132
def __init__(self):
self.var2 = 34
def meth(self, i):
return 42 + i
a = A()
print(getattr(a, "var"))
print(getattr(a, "var2"))
print(getattr(a, "meth")(5))
print(getattr(a, "_none_such", 123))
print(getattr(list, "foo", 456))
print(getattr(a, "va" + "r2"))
|
OpenDMM/bitbake | refs/heads/master | lib/bb/fetch2/perforce.py | 1 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' implementations
Classes for obtaining upstream sources for the
BitBake build tools.
"""
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
from future_builtins import zip
import os
import subprocess
import logging
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import logger
from bb.fetch2 import runfetchcmd
class Perforce(FetchMethod):
def supports(self, ud, d):
return ud.type in ['p4']
def doparse(url, d):
parm = {}
path = url.split("://")[1]
delim = path.find("@");
if delim != -1:
(user, pswd, host, port) = path.split('@')[0].split(":")
path = path.split('@')[1]
else:
(host, port) = data.getVar('P4PORT', d).split(':')
user = ""
pswd = ""
if path.find(";") != -1:
keys=[]
values=[]
plist = path.split(';')
for item in plist:
if item.count('='):
(key, value) = item.split('=')
keys.append(key)
values.append(value)
parm = dict(zip(keys, values))
path = "//" + path.split(';')[0]
host += ":%s" % (port)
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
return host, path, user, pswd, parm
doparse = staticmethod(doparse)
def getcset(d, depot, host, user, pswd, parm):
p4opt = ""
if "cset" in parm:
return parm["cset"];
if user:
p4opt += " -u %s" % (user)
if pswd:
p4opt += " -P %s" % (pswd)
if host:
p4opt += " -p %s" % (host)
p4date = data.getVar("P4DATE", d, True)
if "revision" in parm:
depot += "#%s" % (parm["revision"])
elif "label" in parm:
depot += "@%s" % (parm["label"])
elif p4date:
depot += "@%s" % (p4date)
p4cmd = data.getVar('FETCHCOMMAND_p4', d, True)
logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot)
p4file, errors = bb.process.run("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
cset = p4file.strip()
logger.debug(1, "READ %s", cset)
if not cset:
return -1
return cset.split(' ')[1]
getcset = staticmethod(getcset)
def urldata_init(self, ud, d):
(host, path, user, pswd, parm) = Perforce.doparse(ud.url, d)
# If a label is specified, we use that as our filename
if "label" in parm:
ud.localfile = "%s.tar.gz" % (parm["label"])
return
base = path
which = path.find('/...')
if which != -1:
base = path[:which-1]
base = self._strip_leading_slashes(base)
cset = Perforce.getcset(d, path, host, user, pswd, parm)
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d)
def download(self, ud, d):
"""
Fetch urls
"""
(host, depot, user, pswd, parm) = Perforce.doparse(ud.url, d)
if depot.find('/...') != -1:
path = depot[:depot.find('/...')]
else:
path = depot
module = parm.get('module', os.path.basename(path))
localdata = data.createCopy(d)
data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata)
data.update_data(localdata)
# Get the p4 command
p4opt = ""
if user:
p4opt += " -u %s" % (user)
if pswd:
p4opt += " -P %s" % (pswd)
if host:
p4opt += " -p %s" % (host)
p4cmd = data.getVar('FETCHCOMMAND', localdata, True)
# create temp directory
logger.debug(2, "Fetch: creating temporary directory")
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
tmpfile = tmpfile.strip()
if not tmpfile:
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url)
if "label" in parm:
depot = "%s@%s" % (depot, parm["label"])
else:
cset = Perforce.getcset(d, depot, host, user, pswd, parm)
depot = "%s@%s" % (depot, cset)
os.chdir(tmpfile)
logger.info("Fetch " + ud.url)
logger.info("%s%s files %s", p4cmd, p4opt, depot)
p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot))
p4file = [f.rstrip() for f in p4file.splitlines()]
if not p4file:
raise FetchError("Fetch: unable to get the P4 files from %s" % depot, ud.url)
count = 0
for file in p4file:
list = file.split()
if list[2] == "delete":
continue
dest = list[0][len(path)+1:]
where = dest.find("#")
subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True)
count = count + 1
if count == 0:
logger.error()
raise FetchError("Fetch: No files gathered from the P4 fetch", ud.url)
runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath])
# cleanup
bb.utils.prunedir(tmpfile)
|
XhmikosR/mpc-hc | refs/heads/develop | src/mpc-hc/mpcresources/UpdateISPO.py | 21 | # (C) 2013, 2015 see Authors.txt
#
# This file is part of MPC-HC.
#
# MPC-HC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# MPC-HC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from TranslationDataIS import *
def UpdateISPO(filename):
filename = filename.replace('.strings', '')
translationDataOld = TranslationDataIS()
translationDataOld.loadFromPO('PO\\' + filename, 'po', (False, False, True))
translationData = TranslationDataIS()
translationData.loadFromPO(r'PO\mpc-hc.installer', 'pot', (False, False, True))
translationData.translate(translationDataOld)
translationData.writePO('PO\\' + filename, 'po', (False, False, True))
if __name__ == '__main__':
if len(sys.argv) != 2:
RuntimeError('Invalid number of parameters. Usage: UpdateISPO.py <filename>')
UpdateISPO(sys.argv[1])
|
lianliuwei/gyp | refs/heads/master | pylib/gyp/generator/make.py | 1 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import gyp
import gyp.common
import gyp.system_test
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
def ensure_directory_exists(path):
dir = os.path.dirname(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) $(ARFLAGS.$(TOOLSET)) $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): Find out and document the difference between shared_library and
# loadable_module on mac.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) $(ARFLAGS.$(TOOLSET)) $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= %(flock)s $(builddir)/linker.lock $(CXX)
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
ARFLAGS.target ?= %(ARFLAGS.target)s
# N.B.: the logic of which commands to run should match the computation done
# in gyp's make.py where ARFLAGS.host etc. is computed.
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
ARFLAGS.host := %(ARFLAGS.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds, and deletes the output file when done
# if any of the postbuilds failed.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
F=$$?;\\
if [ $$F -ne 0 ]; then\\
E=$$F;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
SHARED_HEADER_SUN_COMMANDS = """
# gyp-sun-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_sun_tool = SUNTOOL $(4) $<
cmd_sun_tool = ./gyp-sun-tool $(4) $< "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
def InvertRelativePath(path):
"""Given a relative path like foo/bar, return the inverse relative path:
the path from the relative path back to the origin dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string."""
if not path:
return path
# Only need to handle relative paths into subdirectories for now.
assert '..' not in path, path
depth = len(path.split(os.path.sep))
return os.path.sep.join(['..'] * depth)
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter:
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if self.type in self._INSTALLABLE_TARGETS:
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
for output in outputs:
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.GetTargetPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if self.type in self._INSTALLABLE_TARGETS:
if self.type == 'shared_library':
file_desc = 'shared library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def RunSystemTests(flavor):
"""Run tests against the system to compute default settings for commands.
Returns:
dictionary of settings matching the block of command-lines used in
SHARED_HEADER. E.g. the dictionary will contain a ARFLAGS.target
key for the default ARFLAGS for the target ar command.
"""
# Compute flags used for building static archives.
# N.B.: this fallback logic should match the logic in SHARED_HEADER.
# See comment there for more details.
ar_target = GetEnvironFallback(('AR_target', 'AR'), 'ar')
cc_target = GetEnvironFallback(('CC_target', 'CC'), 'cc')
arflags_target = 'crs'
# ar -T enables thin archives on Linux. OS X's ar supports a -T flag, but it
# does something useless (it limits filenames in the archive to 15 chars).
if flavor != 'mac' and gyp.system_test.TestArSupportsT(ar_command=ar_target,
cc_command=cc_target):
arflags_target = 'crsT'
ar_host = os.environ.get('AR_host', 'ar')
cc_host = os.environ.get('CC_host', 'gcc')
arflags_host = 'crs'
# It feels redundant to compute this again given that most builds aren't
# cross-compiles, but due to quirks of history CC_host defaults to 'gcc'
# while CC_target defaults to 'cc', so the commands really are different
# even though they're nearly guaranteed to run the same code underneath.
if flavor != 'mac' and gyp.system_test.TestArSupportsT(ar_command=ar_host,
cc_command=cc_host):
arflags_host = 'crsT'
return { 'ARFLAGS.target': arflags_target,
'ARFLAGS.host': arflags_host }
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(options.generator_output, output_file)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(options.generator_output, makefile_path)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-sun-tool flock',
'flock_index': 2,
'extra_commands': SHARED_HEADER_SUN_COMMANDS,
})
elif flavor == 'freebsd':
header_params.update({
'flock': 'lockf',
})
header_params.update(RunSystemTests(flavor))
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LD_target', 'LD'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LD_host',), 'g++'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
make_global_settings = ''
for key, value in make_global_settings_array:
if value[0] != '$':
value = '$(abspath %s)' % value
if key == 'LINK':
make_global_settings += ('%s ?= %s $(builddir)/linker.lock %s\n' %
(key, flock_command, value))
elif key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
if key in os.environ:
value = os.environ[key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
header_params['make_global_settings'] = make_global_settings
ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
damoti/pyjx-gwt | refs/heads/master | gwt/pyjamas/gmaps/Marker.py | 7 | # Copyright (C) 2009 Daniel Carvalho <idnael@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __pyjamas__ import JS
from pyjamas.gmaps.Utils import dictToJs, createListenerMethods
def Marker(options):
marker = JS("""new $wnd['google']['maps']['Marker'](@{{options}})""")
createListenerMethods(marker)
return marker
def MarkerOptions(**params):
return dictToJs(params)
def MarkerImage(url, size, origin, anchor):
markerImage = JS("""
new $wnd['google']['maps']['MarkerImage'](@{{url}}, @{{size}}, @{{origin}}, @{{anchor}})
""")
createListenerMethods(marker)
return markerImage
|
zulip/zulip | refs/heads/master | zerver/migrations/0268_add_userpresence_realm_timestamp_index.py | 5 | # Generated by Django 1.11.28 on 2020-02-08 20:34
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0267_backfill_userpresence_realm_id"),
]
operations = [
migrations.AlterField(
model_name="userpresence",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AlterIndexTogether(
name="userpresence",
index_together={("realm", "timestamp")},
),
]
|
adit-chandra/tensorflow | refs/heads/master | tensorflow/python/ops/ragged/ragged_constant_value_op_test.py | 9 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstantValueOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist='x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]]),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes.
#
# Note: numpy has different default/inferred types than tensorflow.
# Since we are using values, not tensors, we get the default numpy types
# here.
dict(pylist=[], expected_dtype=np.float64),
dict(pylist=[[[], [[[]], []]]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=np.int64),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=np.float64),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=np.dtype('S1')),
dict(pylist=[[True]], expected_dtype=np.bool),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=np.float64),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=np.float32),
dict(pylist=[], dtype=np.dtype('S1')),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=np.dtype('S1')),
dict(pylist=[], dtype=dtypes.float32, expected_dtype=np.float32),
dict(pylist=[], dtype=dtypes.int32, expected_dtype=np.int32),
)
def testRaggedValues(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_value(pylist).to_list() == pylist`."""
rt = ragged_factory_ops.constant_value(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = _normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
elif dtype is not None:
self.assertEqual(rt.dtype, dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.flat_values.shape[1:], inner_shape)
else:
self.assertEqual(rt.shape, inner_shape)
if expected_shape is not None:
self.assertEqual(tuple(rt.shape), expected_shape)
if rt.shape:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.to_list(), pylist)
else:
self.assertEqual(rt.tolist(), pylist)
if expected_shape is not None:
self.assertEqual(rt.shape, expected_shape)
else:
self.assertEqual(rt, pylist)
if expected_shape is not None:
self.assertEqual((), expected_shape)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=np.array(12),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=array\\(12\\): incompatible with '
'ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(
pylist=[1, 2, 3],
inner_shape=(1, 1),
exception=ValueError,
message='cannot reshape array'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedValuesError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `constant_value()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant_value,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
def _normalize_pylist(item):
"""Convert all (possibly nested) np.arrays contained in item to list."""
# convert np.arrays in current level to list
if np.ndim(item) == 0:
return item
level = (x.tolist() if isinstance(x, np.ndarray) else x for x in item)
return [_normalize_pylist(el) if np.ndim(el) != 0 else el for el in level]
if __name__ == '__main__':
googletest.main()
|
android-ia/platform_external_chromium_org | refs/heads/master | chrome/tools/webforms_extractor.py | 185 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts registration forms from the corresponding HTML files.
Used for extracting forms within HTML files. This script is used in
conjunction with the webforms_aggregator.py script, which aggregates web pages
with fillable forms (i.e registration forms).
The purpose of this script is to extract out all non-form elements that may be
causing parsing errors and timeout issues when running browser_tests.
This script extracts all forms from a HTML file.
If there are multiple forms per downloaded site, multiple files are created
for each form.
Used as a standalone script but assumes that it is run from the directory in
which it is checked into.
Usage: forms_extractor.py [options]
Options:
-l LOG_LEVEL, --log_level=LOG_LEVEL,
LOG_LEVEL: debug, info, warning or error [default: error]
-j, --js extracts javascript elements from web form.
-h, --help show this help message and exit
"""
import glob
import logging
from optparse import OptionParser
import os
import re
import sys
class FormsExtractor(object):
"""Extracts HTML files, leaving only registration forms from the HTML file."""
_HTML_FILES_PATTERN = r'*.html'
_HTML_FILE_PREFIX = r'grabber-'
_FORM_FILE_PREFIX = r'grabber-stripped-'
_REGISTRATION_PAGES_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
_EXTRACTED_FORMS_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
logger = logging.getLogger(__name__)
log_handlers = {'StreamHandler': None}
# This pattern is used for retrieving the form location comment located at the
# top of each downloaded HTML file indicating where the form originated from.
_RE_FORM_LOCATION_PATTERN = re.compile(
ur"""
<!--Form\s{1}Location: # Starting of form location comment.
.*? # Any characters (non-greedy).
--> # Ending of the form comment.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all script code.
_RE_SCRIPT_PATTERN = re.compile(
ur"""
<script # A new opening '<script' tag.
\b # The end of the word 'script'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</script\s*> # The '</script>' closing tag.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all href js code.
_RE_HREF_JS_PATTERN = re.compile(
ur"""
\bhref # The word href and its beginning.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A single or double quote which is captured.
\s*javascript\s*: # The word 'javascript:' with any whitespace possible.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""", re.U | re.S | re.I | re.X)
_RE_EVENT_EXPR = (
ur"""
\b # The beginning of a new word.
on\w+? # All words starting with 'on' (non-greedy)
# example: |onmouseover|.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A captured single or double quote.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""")
# This pattern is used for removing code with js events, such as |onload|.
# By adding the leading |ur'<[^<>]*?'| and the trailing |'ur'[^<>]*?>'| the
# pattern matches to strings such as '<tr class="nav"
# onmouseover="mOvr1(this);" onmouseout="mOut1(this);">'
_RE_TAG_WITH_EVENTS_PATTERN = re.compile(
ur"""
< # Matches character '<'.
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).""" +
_RE_EVENT_EXPR +
ur"""
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).
> # Matches character '>'.
""", re.U | re.S | re.I | re.X)
# Adds whitespace chars at the end of the matched event. Also match trailing
# whitespaces for JS events. Do not match leading whitespace.
# For example: |< /form>| is invalid HTML and does not exist but |</form >| is
# considered valid HTML.
_RE_EVENT_PATTERN = re.compile(
_RE_EVENT_EXPR + ur'\s*', re.U | re.S | re.I | re.X)
# This pattern is used for finding form elements.
_RE_FORM_PATTERN = re.compile(
ur"""
<form # A new opening '<form' tag.
\b # The end of the word 'form'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</form\s*> # The '</form>' closing tag.
""", re.U | re.S | re.I | re.X)
def __init__(self, input_dir=_REGISTRATION_PAGES_DIR,
output_dir=_EXTRACTED_FORMS_DIR, logging_level=None):
"""Creates a FormsExtractor object.
Args:
input_dir: the directory of HTML files.
output_dir: the directory where the registration form files will be
saved.
logging_level: verbosity level, default is None.
Raises:
IOError exception if input directory doesn't exist.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self._input_dir = input_dir
self._output_dir = output_dir
if not os.path.isdir(self._input_dir):
error_msg = 'Directory "%s" doesn\'t exist.' % self._input_dir
self.logger.error('Error: %s', error_msg)
raise IOError(error_msg)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self._form_location_comment = ''
def _SubstituteAllEvents(self, matchobj):
"""Remove all js events that are present as attributes within a tag.
Args:
matchobj: A regexp |re.MatchObject| containing text that has at least one
event. Example: |<tr class="nav" onmouseover="mOvr1(this);"
onmouseout="mOut1(this);">|.
Returns:
The text containing the tag with all the attributes except for the tags
with events. Example: |<tr class="nav">|.
"""
tag_with_all_attrs = matchobj.group(0)
return self._RE_EVENT_PATTERN.sub('', tag_with_all_attrs)
def Extract(self, strip_js_only):
"""Extracts and saves the extracted registration forms.
Iterates through all the HTML files.
Args:
strip_js_only: If True, only Javascript is stripped from the HTML content.
Otherwise, all non-form elements are stripped.
"""
pathname_pattern = os.path.join(self._input_dir, self._HTML_FILES_PATTERN)
html_files = [f for f in glob.glob(pathname_pattern) if os.path.isfile(f)]
for filename in html_files:
self.logger.info('Stripping file "%s" ...', filename)
with open(filename, 'U') as f:
html_content = self._RE_TAG_WITH_EVENTS_PATTERN.sub(
self._SubstituteAllEvents,
self._RE_HREF_JS_PATTERN.sub(
'', self._RE_SCRIPT_PATTERN.sub('', f.read())))
form_filename = os.path.split(filename)[1] # Path dropped.
form_filename = form_filename.replace(self._HTML_FILE_PREFIX, '', 1)
(form_filename, extension) = os.path.splitext(form_filename)
form_filename = (self._FORM_FILE_PREFIX + form_filename +
'%s' + extension)
form_filename = os.path.join(self._output_dir, form_filename)
if strip_js_only:
form_filename = form_filename % ''
try:
with open(form_filename, 'w') as f:
f.write(html_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
else: # Remove all non form elements.
match = self._RE_FORM_LOCATION_PATTERN.search(html_content)
if match:
form_location_comment = match.group() + os.linesep
else:
form_location_comment = ''
forms_iterator = self._RE_FORM_PATTERN.finditer(html_content)
for form_number, form_match in enumerate(forms_iterator, start=1):
form_content = form_match.group()
numbered_form_filename = form_filename % form_number
try:
with open(numbered_form_filename, 'w') as f:
f.write(form_location_comment)
f.write(form_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
self.logger.info('\tFile "%s" extracted SUCCESSFULLY!', filename)
def main():
parser = OptionParser()
parser.add_option(
'-l', '--log_level', metavar='LOG_LEVEL', default='error',
help='LOG_LEVEL: debug, info, warning or error [default: %default]')
parser.add_option(
'-j', '--js', dest='js', action='store_true', default=False,
help='Removes all javascript elements [default: %default]')
(options, args) = parser.parse_args()
options.log_level = options.log_level.upper()
if options.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
print 'Wrong log_level argument.'
parser.print_help()
return 1
options.log_level = getattr(logging, options.log_level)
extractor = FormsExtractor(logging_level=options.log_level)
extractor.Extract(options.js)
return 0
if __name__ == '__main__':
sys.exit(main())
|
HEG-Arc/paleo-2015-gestionair-remote | refs/heads/master | remote_control.py | 1 | import threading
import RPi.GPIO as GPIO
#from GPIOEmulator.EmulatorGUI import GPIO
import time
import requests
import json
import logging
API_URL = 'http://192.168.1.1'
class Led(object):
def __init__(self, gpio):
self.gpio = gpio
self._stop = None
self.blinking = False
def blink(self):
if self.blinking:
return
self.blinking = True
self._stop = threading.Event()
threading.Thread(target=self._blink).start()
def _blink(self):
out = True
while not self._stop.is_set():
GPIO.output(self.gpio, int(out))
out = not out
self._stop.wait(0.3)
def _blink_stop(self):
self.blinking = False
if self._stop:
self._stop.set()
def set(self, on):
if on: self.on()
else: self.off()
def on(self):
self._blink_stop()
GPIO.output(self.gpio, 1)
def off(self):
self._blink_stop()
GPIO.output(self.gpio, 0)
BTN_KEY = 23
LED_ON_GPIO = 7
LED_ON = Led(LED_ON_GPIO)
BTN_DEMO = 10
LED_DEMO_GPIO = 25
LED_DEMO = Led(LED_DEMO_GPIO)
BTN_CALL = 17
BTN_START = 27
LED_START_GPIO = 8
LED_START = Led(LED_START_GPIO)
BTN_STOP = 22
# setup GPIO for remote
GPIO.setmode(GPIO.BCM)
GPIO.setup(BTN_KEY, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_DEMO, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_CALL, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_START, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_STOP, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_ON_GPIO, GPIO.OUT)
GPIO.setup(LED_DEMO_GPIO, GPIO.OUT)
GPIO.setup(LED_START_GPIO, GPIO.OUT)
GPIO.output(LED_ON_GPIO, 0)
GPIO.output(LED_DEMO_GPIO, 0)
GPIO.output(LED_START_GPIO, 0)
logger = logging.getLogger("Gestion'air Remote")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("remote.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("Starting the Gestion'air Remote daemon...")
def effect_start():
for i in range(1,6):
GPIO.output(LED_START_GPIO, 0)
GPIO.output(LED_ON_GPIO, 1)
time.sleep(0.2)
GPIO.output(LED_ON_GPIO, 0)
GPIO.output(LED_DEMO_GPIO, 1)
time.sleep(0.2)
GPIO.output(LED_DEMO_GPIO, 0)
GPIO.output(LED_START_GPIO, 1)
time.sleep(0.2)
GPIO.output(LED_START_GPIO, 0)
GPIO.output(LED_ON_GPIO, 1)
def key_event(channel):
# ON: Should trigger led test and then get into normal operation mode
if GPIO.input(BTN_KEY) == 0:
effect_start()
LED_ON.on()
else:
# OFF: QUESTION: ?? lock dashboard or stop sim?
LED_ON.off()
LED_DEMO.off()
GPIO.add_event_detect(BTN_KEY, GPIO.BOTH, callback=key_event)
def start_event(channel):
requests.get(API_URL + '/game/start')
LED_START.blink()
GPIO.add_event_detect(BTN_START, GPIO.FALLING, callback=start_event, bouncetime=500)
def stop_event(channel):
requests.get(API_URL + '/game/stop')
LED_START.blink()
GPIO.add_event_detect(BTN_STOP, GPIO.FALLING, callback=stop_event, bouncetime=500)
def call_event(channel):
requests.get(API_URL + '/game/api/play-sound/call')
GPIO.add_event_detect(BTN_CALL, GPIO.FALLING, callback=call_event, bouncetime=500)
def demo_event(channel):
requests.get(API_URL + '/game/api/call/1201')
LED_DEMO.blink()
GPIO.add_event_detect(BTN_DEMO, GPIO.FALLING, callback=demo_event, bouncetime=500)
try:
while True:
if GPIO.input(BTN_KEY) == 0:
# check sim status for leds
try:
res = requests.get(API_URL + '/game/api/status').json()
LED_ON.on()
except:
res = {
'isRunning': False,
'demoState': 'ERROR',
}
LED_ON.blink()
## led_on ??
LED_START.set(res['isRunning'])
# on indicate ready for demo, blinking during ringing, off during answer
if res['demoState'] == 'FREE':
LED_DEMO.on()
elif res['demoState'] == 'RINGING':
LED_DEMO.blink()
else:
LED_DEMO.off()
# some sleep
time.sleep(1)
finally:
logger.info("Terminating the Gestion'air Remote daemon...")
GPIO.cleanup()
|
Stavitsky/nova | refs/heads/master | nova/api/openstack/compute/plugins/v3/image_size.py | 36 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "image-size"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
key = "OS-EXT-IMG-SIZE:size"
image[key] = image_cache['size']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
# it in its 'show' method
image_cached = req.get_db_item('images', image_resp['id'])
self._extend_image(image_resp, image_cached)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
# it in its 'detail' method
for image in images_resp:
image_cached = req.get_db_item('images', image['id'])
self._extend_image(image, image_cached)
class ImageSize(extensions.V3APIExtensionBase):
"""Adds image size to image listings."""
name = "ImageSize"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ImageSizeController()
extension = extensions.ControllerExtension(self, 'images', controller)
return [extension]
def get_resources(self):
return []
|
yokose-ks/edx-platform | refs/heads/gacco3/master | common/lib/xmodule/xmodule/templates.py | 231 | """
This module handles loading xmodule templates
These templates are used by the CMS to provide content that overrides xmodule defaults for
samples.
``Template``s are defined in x_module. They contain 2 attributes:
:metadata: A dictionary with the template metadata
:data: A JSON value that defines the template content
"""
# should this move to cms since it's really only for module crud?
import logging
from collections import defaultdict
from xblock.core import XBlock
log = logging.getLogger(__name__)
def all_templates():
"""
Returns all templates for enabled modules, grouped by descriptor type
"""
# TODO use memcache to memoize w/ expiration
templates = defaultdict(list)
for category, descriptor in XBlock.load_classes():
if not hasattr(descriptor, 'templates'):
continue
templates[category] = descriptor.templates()
return templates
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.