repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Artemkaaas/indy-sdk
|
refs/heads/master
|
samples/python/src/anoncreds_revocation.py
|
2
|
import time
from indy import anoncreds, wallet
import json
import logging
from indy import blob_storage
from indy import pool
from src.utils import run_coroutine, path_home, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Anoncreds Revocation sample -> started")
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex", "height", "name"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
# 4. Issuer create Credential Definition for Schema
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": True})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
# 5. Issuer create Revocation Registry
issuer['tails_writer_config'] = json.dumps({'base_dir': str(path_home().joinpath("tails")), 'uri_pattern': ''})
issuer['tails_writer'] = await blob_storage.open_writer('default', issuer['tails_writer_config'])
revoc_reg_def = {
'tag': 'cred_def_tag',
'config': json.dumps({"max_cred_num": 5, 'issuance_type': 'ISSUANCE_ON_DEMAND'})
}
(issuer['rev_reg_id'], issuer['rev_reg_def'], issuer['rev_reg_entry']) = \
await anoncreds.issuer_create_and_store_revoc_reg(issuer['wallet'], issuer['did'], None, revoc_reg_def['tag'],
issuer['cred_def_id'], revoc_reg_def['config'],
issuer['tails_writer'])
store[issuer['rev_reg_id']] = {
'definition': issuer['rev_reg_def'],
'value': issuer['rev_reg_entry']
}
# 6. Prover create Master Secret
prover['master_secret_id'] = await anoncreds.prover_create_master_secret(prover['wallet'], None)
# 7. Issuer create Credential Offer
issuer['cred_offer'] = await anoncreds.issuer_create_credential_offer(issuer['wallet'], issuer['cred_def_id'])
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# 8. Prover create Credential Request
prover['cred_req'], prover['cred_req_metadata'] = \
await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
# 9. Issuer open Tails reader
issuer['blob_storage_reader'] = await blob_storage.open_reader('default', issuer['tails_writer_config'])
# 10. Issuer create Credential
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
(cred_json, rev_id, rev_reg_delta_json) = \
await anoncreds.issuer_create_credential(issuer['wallet'], issuer['cred_offer'], issuer['cred_req'],
issuer['cred_values'], issuer['rev_reg_id'],
issuer['blob_storage_reader'])
issuer['rev_id'] = rev_id
store[issuer['rev_reg_id']]['delta'] = rev_reg_delta_json
prover['cred'] = cred_json
# 11. Prover store Credential
cred = json.loads(prover['cred'])
prover['rev_reg_id'] = cred['rev_reg_id']
prover['rev_reg_def'] = store[prover['rev_reg_id']]['definition']
prover['rev_reg_delta'] = store[prover['rev_reg_id']]['delta']
await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'],
prover['cred'], prover['cred_def'], prover['rev_reg_def'])
# 10. Verifier builds Proof Request
nonce = await anoncreds.generate_nonce()
timestamp = int(time.time())
verifier['proof_req'] = json.dumps({
'nonce': nonce,
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'name'}
},
'requested_predicates': {
'predicate1_referent': {'name': 'age', 'p_type': '>=', 'p_value': 18}
},
"non_revoked": {"to": timestamp}
})
prover['proof_req'] = verifier['proof_req']
# Prover gets Credentials for Proof Request
prover['cred_search_handle'] = \
await anoncreds.prover_search_credentials_for_proof_req(prover['wallet'], prover['proof_req'], None)
# Prover gets Credentials for attr1_referent
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'attr1_referent', 10)
prover['cred_for_attr1'] = json.loads(creds_for_attr1)[0]['cred_info']
# Prover gets Credentials for predicate1_referent
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'predicate1_referent', 10)
prover['cred_for_predicate1'] = json.loads(creds_for_predicate1)[0]['cred_info']
await anoncreds.prover_close_credentials_search_for_proof_req(prover['cred_search_handle'])
# 12. Prover creates revocation state
prover['tails_reader_config'] = json.dumps({'base_dir': str(path_home().joinpath("tails")), 'uri_pattern': ''})
prover['blob_storage_reader'] = await blob_storage.open_reader('default', prover['tails_reader_config'])
rev_state_json = await anoncreds.create_revocation_state(prover['blob_storage_reader'], prover['rev_reg_def'],
prover['rev_reg_delta'], timestamp,
prover['cred_for_attr1']['cred_rev_id'])
# 13. Prover create Proof for Proof Request
prover['requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {'attr1_referent': {
'cred_id': prover['cred_for_attr1']['referent'], 'revealed': True, 'timestamp': timestamp}
},
'requested_predicates': {
'predicate1_referent': {'cred_id': prover['cred_for_predicate1']['referent'], 'timestamp': timestamp}
}
})
schemas_json = json.dumps({prover['schema_id']: json.loads(prover['schema'])})
cred_defs_json = json.dumps({prover['cred_def_id']: json.loads(prover['cred_def'])})
revoc_states_json = json.dumps({prover['rev_reg_id']: {timestamp: json.loads(rev_state_json)}})
prover['proof'] = \
await anoncreds.prover_create_proof(prover['wallet'], prover['proof_req'], prover['requested_creds'],
prover['master_secret_id'], schemas_json, cred_defs_json, revoc_states_json)
verifier['proof'] = prover['proof']
# 12. Verifier verify proof
proof = json.loads(verifier['proof'])
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
identifier = proof['identifiers'][0]
verifier['cred_def_id'] = identifier['cred_def_id']
verifier['schema_id'] = identifier['schema_id']
verifier['rev_reg_id'] = identifier['rev_reg_id']
verifier['cred_def'] = store[verifier['cred_def_id']]
verifier['schema'] = store[verifier['schema_id']]
verifier['rev_reg_def'] = store[verifier['rev_reg_id']]['definition']
verifier['rev_reg_value'] = store[verifier['rev_reg_id']]['delta']
schemas_json = json.dumps({verifier['schema_id']: json.loads(verifier['schema'])})
cred_defs_json = json.dumps({verifier['cred_def_id']: json.loads(verifier['cred_def'])})
revoc_ref_defs_json = json.dumps({verifier['rev_reg_id']: json.loads(verifier['rev_reg_def'])})
revoc_regs_json = json.dumps({verifier['rev_reg_id']: {timestamp: json.loads(verifier['rev_reg_value'])}})
assert await anoncreds.verifier_verify_proof(verifier['proof_req'], verifier['proof'], schemas_json, cred_defs_json,
revoc_ref_defs_json, revoc_regs_json)
# 13. Close and delete Issuer wallet
await wallet.close_wallet(issuer['wallet'])
await wallet.delete_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 14. Close and delete Prover wallet
await wallet.close_wallet(prover['wallet'])
await wallet.delete_wallet(prover['wallet_config'], prover['wallet_credentials'])
logger.info("Anoncreds Revocation sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
|
ruslan2k/public-files
|
refs/heads/master
|
python/05-pinax/mysite/resources/views.py
|
1
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from resources.models import Resource, Item, Profile
import pprint as pp
import account.views
from .forms import ResourceForm, ItemForm, DelItemForm
#from django.contrib.auth.models import Group
class SignupView(account.views.SignupView):
def after_signup(self, form):
self.update_profile(form)
super(SignupView, self).after_signup(form)
def update_profile(self, form):
pp.pprint("update_profile")
pp.pprint(self.created_user.__dict__)
#pp.pprint(self.created_user.smart_user)
#pp.pprint(self.created_user.smartuser)
# profile =
#smartuser = self.created_user.smartuser
#smartuser.salt = "bla-bla-bla"
#smartuser.save()
class LoginView(account.views.LoginView):
def after_login(self, form):
pp.pprint("after_login")
#pp.pprint(form.cleaned_data["password"])
self.update_session(form)
super(LoginView, self).after_login(form)
def update_session(self, form):
smart_user = self.login_user
pp.pprint(smart_user)
@login_required(login_url='/accounts/login/')
def index(request):
if not request.session.has_key('test'):
request.session['test'] = 10
else:
request.session['test'] = request.session['test'] + 1
pp.pprint(request.session['test'])
#resources = Resource.objects.all()
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ResourceForm(request.POST)
# check whether it's valid:
if form.is_valid():
pp.pprint(form.cleaned_data)
resource = Resource(name=form.cleaned_data["resource_name"], user_id=request.user.id)
resource.save()
return HttpResponseRedirect('/resources')
else:
form = ResourceForm()
resources = request.user.resource_set.all()
context = {"resources": resources, "form": form}
return render(request, "resources/index.html", context)
@login_required(login_url='/accounts/login/')
def detail(request, resource_id):
resource = get_object_or_404(Resource, pk=resource_id)
if request.method == 'POST':
form = ItemForm(request.POST)
if form.is_valid():
item = Item(key=form.cleaned_data["item_key"], val=form.cleaned_data["item_val"],
resource_id=resource_id)
item.save()
return HttpResponseRedirect("/resources/%s/" % resource_id)
else:
form = ItemForm()
context = {"resource": resource, "form": form}
return render(request, "resources/detail.html", context)
@login_required(login_url='/accounts/login/')
def delete_item(request, resource_id, item_id):
resource = get_object_or_404(Resource, pk=resource_id)
item = get_object_or_404(Item, pk=item_id)
if resource.user_id != request.user.id or resource.id != item.resource_id:
raise Http404
if request.method == 'POST':
form = DelItemForm(request.POST)
if form.is_valid():
item.delete()
return HttpResponseRedirect("/resources/%s/" % resource_id)
else:
form = DelItemForm()
context = {"form": form}
return render(request, "items/delete.html", context)
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/refactoring/pushdown/multiFileImports/shared_module.py
|
160
|
MODULE_CONTANT = 42
def module_function():
pass
class ModuleClass(object):
pass
|
thaumos/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_routetable_facts.py
|
14
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_routetable_facts
version_added: "2.7"
short_description: Get route table facts.
description:
- Get facts for a specific route table or all route table in a resource group or subscription.
options:
name:
description:
- Limit results to a specific route table.
resource_group:
description:
- Limit results in a specific resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Get facts for one route table
azure_rm_routetable_facts:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all route tables
azure_rm_routetable_facts:
resource_group: myResourceGroup
- name: Get facts by tags
azure_rm_routetable_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
id:
description: Resource id.
returned: success
type: str
name:
description: Name of the resource.
returned: success
type: str
resource_group:
description: Resource group of the route table.
returned: success
type: str
disable_bgp_route_propagation:
description: Whether the routes learned by BGP on that route table disabled.
returned: success
type: bool
tags:
description: Tags of the route table.
returned: success
type: list
routes:
description: Current routes of the route table.
returned: success
type: list
sample: [
{
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/routeTables/foobar/routes/route",
"name": "route",
"resource_group": "Testing",
"routeTables": "foobar",
"address_prefix": "192.0.0.1",
"next_hop_type": "virtual_networkGateway"
}
]
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
from ansible.module_utils.common.dict_transformations import _camel_to_snake
def route_to_dict(route):
id_dict = azure_id_to_dict(route.id)
return dict(
id=route.id,
name=route.name,
resource_group=id_dict.get('resourceGroups'),
route_table_name=id_dict.get('routeTables'),
address_prefix=route.address_prefix,
next_hop_type=_camel_to_snake(route.next_hop_type),
next_hop_ip_address=route.next_hop_ip_address
)
def instance_to_dict(table):
return dict(
id=table.id,
name=table.name,
resource_group=azure_id_to_dict(table.id).get('resourceGroups'),
location=table.location,
routes=[route_to_dict(i) for i in table.routes] if table.routes else [],
disable_bgp_route_propagation=table.disable_bgp_route_propagation,
tags=table.tags
)
class AzureRMRouteTableFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
route_tables=[]
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMRouteTableFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
response = []
if self.name:
response = self.get_item()
elif self.resource_group:
response = self.list_items()
else:
response = self.list_all_items()
self.results['route_tables'] = [instance_to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
return self.results
def get_item(self):
self.log('Get route table for {0}-{1}'.format(self.resource_group, self.name))
try:
item = self.network_client.route_tables.get(self.resource_group, self.name)
return [item]
except CloudError:
pass
return []
def list_items(self):
self.log('List all items in resource group')
try:
return self.network_client.route_tables.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list items - {0}".format(str(exc)))
return []
def list_all_items(self):
self.log("List all items in subscription")
try:
return self.network_client.route_tables.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
return []
def main():
AzureRMRouteTableFacts()
if __name__ == '__main__':
main()
|
acsone/odoo
|
refs/heads/8.0
|
addons/mail/mail_thread.py
|
27
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
from collections import OrderedDict
import datetime
import dateutil
import email
try:
import simplejson as json
except ImportError:
import json
from lxml import etree
import logging
import pytz
import re
import socket
import time
import xmlrpclib
from email.message import Message
from email.utils import formataddr
from urllib import urlencode
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.osv.orm import BaseModel
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
mail_header_msgid_re = re.compile('<[^<>]+>')
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
_mail_post_access = 'write'
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
# Mass mailing feature
_mail_mass_mailing = False
def get_empty_list_help(self, cr, uid, help, context=None):
""" Override of BaseModel.get_empty_list_help() to generate an help message
that adds alias information. """
model = context.get('empty_list_help_model')
res_id = context.get('empty_list_help_id')
ir_config_parameter = self.pool.get("ir.config_parameter")
catchall_domain = ir_config_parameter.get_param(cr, SUPERUSER_ID, "mail.catchall.domain", context=context)
document_name = context.get('empty_list_help_document_name', _('document'))
alias = None
if catchall_domain and model and res_id: # specific res_id -> find its alias (i.e. section_id specified)
object_id = self.pool.get(model).browse(cr, uid, res_id, context=context)
# check that the alias effectively creates new records
if object_id.alias_id and object_id.alias_id.alias_name and \
object_id.alias_id.alias_model_id and \
object_id.alias_id.alias_model_id.model == self._name and \
object_id.alias_id.alias_force_thread_id == 0:
alias = object_id.alias_id
if not alias and catchall_domain and model: # no res_id or res_id not linked to an alias -> generic help message, take a generic alias of the model
alias_obj = self.pool.get('mail.alias')
alias_ids = alias_obj.search(cr, uid, [("alias_parent_model_id.model", "=", model), ("alias_name", "!=", False), ('alias_force_thread_id', '=', False), ('alias_parent_thread_id', '=', False)], context=context, order='id ASC')
if alias_ids and len(alias_ids) == 1:
alias = alias_obj.browse(cr, uid, alias_ids[0], context=context)
add_arrow = not help or help.find("oe_view_nocontent_create") == -1
if alias:
email_link = "<a href='mailto:%(email)s'>%(email)s</a>" % {'email': alias.name_get()[0][1]}
if add_arrow:
return _("""<p class='oe_view_nocontent_create'>
Click here to add new %(document)s or send an email to: %(email)s.
</p>
%(static_help)s"""
) % {
'document': document_name, 'email': email_link, 'static_help': help or ''
}
return _("""%(static_help)s
<p>
You could also add a new %(document)s by sending an email to: %(email)s.
</p>""") % {
'document': document_name, 'email': email_link, 'static_help': help or ''
}
if add_arrow:
return _("<p class='oe_view_nocontent_create'>Click here to add new %(document)s</p>%(static_help)s") % {
'document': document_name, 'static_help': help or ''
}
return help
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.is_read = False or n.is_read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
res[id].pop('message_unread_count', None)
return res
def read_followers_data(self, cr, uid, follower_ids, context=None):
result = []
for follower in self.pool.get('res.partner').browse(cr, uid, follower_ids, context=context):
is_editable = self.pool['res.users'].has_group(cr, uid, 'base.group_no_one')
is_uid = uid in map(lambda x: x.id, follower.user_ids)
data = (follower.id,
follower.name,
{'is_editable': is_editable, 'is_uid': is_uid},
)
result.append(data)
return result
def _get_subscription_data(self, cr, uid, ids, name, args, user_pid=None, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
if user_pid is None:
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(
cr, uid, [
'&', ('hidden', '=', False), '|', ('res_model', '=', self._name), ('res_model', '=', False)
], context=context)
subtype_dict = OrderedDict(
(subtype.name, {
'default': subtype.default,
'followed': False,
'parent_model': subtype.parent_id and subtype.parent_id.res_model or self._name,
'id': subtype.id}
) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in [st for st in fol.subtype_ids if st.name in thread_subtype_dict]:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
self.message_unsubscribe(cr, uid, [id], list(old-new), context=context)
# add new followers
self.message_subscribe(cr, uid, [id], list(new-old), context=context)
def _search_followers(self, cr, uid, obj, name, args, context):
"""Search function for message_follower_ids
Do not use with operator 'not in'. Use instead message_is_followers
"""
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_is_follower(self, cr, uid, obj, name, args, context):
"""Search function for message_is_follower"""
res = []
for field, operator, value in args:
assert field == name
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
res_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
else: # is not a follower or unknown domain
mail_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
res_ids = self.search(cr, uid, [('id', 'not in', mail_ids)], context=context)
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers, type='boolean',
fnct_search=_search_is_follower, string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many', priority=-10,
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_last_post': fields.datetime('Last Message Date',
help='Date of the last message posted on the record.'),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
def _get_user_chatter_options(self, cr, uid, context=None):
options = {
'display_log_button': False
}
is_employee = self.pool['res.users'].has_group(cr, uid, 'base.group_user')
if is_employee:
options['display_log_button'] = True
return options
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(mail_thread, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='message_ids']"):
options = json.loads(node.get('options', '{}'))
options.update(self._get_user_chatter_options(cr, uid, context=context))
node.set('options', json.dumps(options))
res['arch'] = etree.tostring(doc)
return res
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
if context.get('tracking_disable'):
return super(mail_thread, self).create(
cr, uid, values, context=context)
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid).partner_id.id
message_follower_ids = values.get('message_follower_ids') or [] # webclient can send None or False
message_follower_ids.append([4, pid])
values['message_follower_ids'] = message_follower_ids
thread_id = super(mail_thread, self).create(cr, uid, values, context=context)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
ir_model_pool = self.pool['ir.model']
ids = ir_model_pool.search(cr, uid, [('model', '=', self._name)], context=context)
name = ir_model_pool.read(cr, uid, ids, ['name'], context=context)[0]['name']
self.message_post(cr, uid, thread_id, body=_('%s created') % name, context=context)
# auto_subscribe: take values and defaults into account
create_values = dict(values)
for key, val in context.iteritems():
if key.startswith('default_') and key[8:] not in create_values:
create_values[key[8:]] = val
self.message_auto_subscribe(cr, uid, [thread_id], create_values.keys(), context=context, values=create_values)
# track values
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
initial_values = {thread_id: dict.fromkeys(tracked_fields, False)}
self.message_track(cr, uid, [thread_id], tracked_fields, initial_values, context=track_ctx)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if context.get('tracking_disable'):
return super(mail_thread, self).write(
cr, uid, ids, values, context=context)
# Track initial values of tracked fields
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = None
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
records = self.browse(cr, uid, ids, context=track_ctx)
initial_values = dict((record.id, dict((key, getattr(record, key)) for key in tracked_fields))
for record in records)
# Perform write
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=track_ctx)
# update followers
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context, values=values)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
if isinstance(ids, (int, long)):
ids = [ids]
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy_data(self, cr, uid, id, default=None, context=None):
# avoid tracking multiple temporary changes during copy
context = dict(context or {}, mail_notrack=True)
return super(mail_thread, self).copy_data(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return dict: a dict mapping field name to description, containing
always tracked fields and modified on_change fields
"""
tracked_fields = []
for name, field in self._fields.items():
visibility = getattr(field, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
tracked_fields.append(name)
if tracked_fields:
return self.fields_get(cr, uid, tracked_fields, context=context)
return {}
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
field = self._fields[col_name]
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and getattr(field, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(
col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info),
)
elif record_value != initial_value and (record_value or initial_value): # because browse null != False
if getattr(field, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(
col_info=col_info['string'],
old_value=convert_for_display(initial_value, col_info),
new_value=convert_for_display(record_value, col_info),
)
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
# By passing this key, that allows to let the subtype empty and so don't sent email because partners_to_notify from mail_message._notify will be empty
if not context.get('mail_track_log_only'):
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
@api.cr_uid_ids_context
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
""" mail.message check permission rules for related document. This method is
meant to be inherited in order to implement addons-specific behavior.
A common behavior would be to allow creating messages when having read
access rule on the document, for portal document such as issues. """
if not model_obj:
model_obj = self
if hasattr(self, '_mail_post_access'):
create_allow = self._mail_post_access
else:
create_allow = 'write'
if operation in ['write', 'unlink']:
check_operation = 'write'
elif operation == 'create' and create_allow in ['create', 'read', 'write', 'unlink']:
check_operation = create_allow
elif operation == 'create':
check_operation = 'write'
else:
check_operation = operation
model_obj.check_access_rights(cr, uid, check_operation)
model_obj.check_access_rule(cr, uid, mids, check_operation, context=context)
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" When redirecting towards the Inbox, choose which action xml_id has
to be fetched. This method is meant to be inherited, at least in portal
because portal users have a different Inbox action than classic users. """
return ('mail', 'action_mail_inbox_feeds')
def message_redirect_action(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
if context is None:
context = {}
# default action is the Inbox action
self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
act_model, act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, *self._get_inbox_action_xml_id(cr, uid, context=context))
action = self.pool.get(act_model).read(cr, uid, [act_id], [])[0]
params = context.get('params')
msg_id = model = res_id = None
if params:
msg_id = params.get('message_id')
model = params.get('model')
res_id = params.get('res_id', params.get('id')) # signup automatically generated id instead of res_id
if not msg_id and not (model and res_id):
return action
if msg_id and not (model and res_id):
msg = self.pool.get('mail.message').browse(cr, uid, msg_id, context=context)
if msg.exists():
model, res_id = msg.model, msg.res_id
# if model + res_id found: try to redirect to the document or fallback on the Inbox
if model and res_id:
model_obj = self.pool.get(model)
if model_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
try:
model_obj.check_access_rule(cr, uid, [res_id], 'read', context=context)
action = model_obj.get_access_action(cr, uid, res_id, context=context)
except (osv.except_osv, orm.except_orm):
pass
action.update({
'context': {
'search_default_model': model,
'search_default_res_id': res_id,
}
})
return action
def _get_access_link(self, cr, uid, mail, partner, context=None):
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': partner.user_ids[0].login,
'action': 'mail.action_mail_redirect',
}
if mail.notification:
fragment['message_id'] = mail.mail_message_id.id
elif mail.model and mail.res_id:
fragment.update(model=mail.model, res_id=mail.res_id)
return "/web?%s#%s" % (urlencode(query), urlencode(fragment))
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_default_recipients(self, cr, uid, ids, context=None):
if context and context.get('thread_model') and context['thread_model'] in self.pool and context['thread_model'] != self._name:
if hasattr(self.pool[context['thread_model']], 'message_get_default_recipients'):
sub_ctx = dict(context)
sub_ctx.pop('thread_model')
return self.pool[context['thread_model']].message_get_default_recipients(cr, uid, ids, context=sub_ctx)
res = {}
for record in self.browse(cr, SUPERUSER_ID, ids, context=context):
recipient_ids, email_to, email_cc = set(), False, False
if 'partner_id' in self._fields and record.partner_id:
recipient_ids.add(record.partner_id.id)
elif 'email_from' in self._fields and record.email_from:
email_to = record.email_from
elif 'email' in self._fields:
email_to = record.email
res[record.id] = {'partner_ids': list(recipient_ids), 'email_to': email_to, 'email_cc': email_cc}
return res
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Returns the preferred reply-to email address that is basically
the alias of the document, if it exists. """
if context is None:
context = {}
model_name = context.get('thread_model') or self._name
alias_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
res = dict.fromkeys(ids, False)
# alias domain: check for aliases and catchall
aliases = {}
doc_names = {}
if alias_domain:
if model_name and model_name != 'mail.thread':
alias_ids = self.pool['mail.alias'].search(
cr, SUPERUSER_ID, [
('alias_parent_model_id.model', '=', model_name),
('alias_parent_thread_id', 'in', ids),
('alias_name', '!=', False)
], context=context)
aliases.update(
dict((alias.alias_parent_thread_id, '%s@%s' % (alias.alias_name, alias_domain))
for alias in self.pool['mail.alias'].browse(cr, SUPERUSER_ID, alias_ids, context=context)))
doc_names.update(
dict((ng_res[0], ng_res[1])
for ng_res in self.pool[model_name].name_get(cr, SUPERUSER_ID, aliases.keys(), context=context)))
# left ids: use catchall
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids:
catchall_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.alias", context=context)
if catchall_alias:
aliases.update(dict((res_id, '%s@%s' % (catchall_alias, alias_domain)) for res_id in left_ids))
# compute name of reply-to
company_name = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).company_id.name
for res_id in aliases.keys():
email_name = '%s%s' % (company_name, doc_names.get(res_id) and (' ' + doc_names[res_id]) or '')
email_addr = aliases[res_id]
res[res_id] = formataddr((email_name, email_addr))
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids and default:
res.update(dict((res_id, default) for res_id in left_ids))
return res
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
""" Get specific notification email values to store on the notification
mail_mail. Void method, inherit it to add custom values. """
res = dict()
return res
def message_get_recipient_values(self, cr, uid, id, notif_message=None, recipient_ids=None, context=None):
""" Get specific notification recipient values to store on the notification
mail_mail. Basic method just set the recipient partners as mail_mail
recipients. Inherit this method to add custom behavior like using
recipient email_to to bypass the recipint_ids heuristics in the
mail sending mechanism. """
return {
'recipient_ids': [(4, pid) for pid in recipient_ids]
}
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool[model_name]
if hasattr(model, "message_process") and hasattr(model, "message_post"):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
:param string message: an email.message instance """
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
return filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, tools.email_split(s), context=context))
def message_route_verify(self, cr, uid, message, message_dict, route, update_author=True, assert_model=True, create_fallback=True, allow_private=False, context=None):
""" Verify route validity. Check and rules:
1 - if thread_id -> check that document effectively exists; otherwise
fallback on a message_new by resetting thread_id
2 - check that message_update exists if thread_id is set; or at least
that message_new exist
[ - find author_id if udpate_author is set]
3 - if there is an alias, check alias_contact:
'followers' and thread_id:
check on target document that the author is in the followers
'followers' and alias_parent_thread_id:
check on alias parent document that the author is in the
followers
'partners': check that author_id id set
"""
assert isinstance(route, (list, tuple)), 'A route should be a list or a tuple'
assert len(route) == 5, 'A route should contain 5 elements: model, thread_id, custom_values, uid, alias record'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
author_id = message_dict.get('author_id')
model, thread_id, alias = route[0], route[1], route[4]
model_pool = None
def _create_bounce_email():
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'body_html': '<div><p>Hello,</p>'
'<p>The following email sent to %s cannot be accepted because this is '
'a private email address. Only allowed people can contact us at this address.</p></div>'
'<blockquote>%s</blockquote>' % (message.get('to'), message_dict.get('body')),
'subject': 'Re: %s' % message.get('subject'),
'email_to': message.get('from'),
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
def _warn(message):
_logger.warning('Routing mail with Message-Id %s: route %s: %s',
message_id, route, message)
# Wrong model
if model and not model in self.pool:
if assert_model:
assert model in self.pool, 'Routing: unknown target model %s' % model
_warn('unknown target model %s' % model)
return ()
elif model:
model_pool = self.pool[model]
# Private message: should not contain any thread_id
if not model and thread_id:
if assert_model:
if thread_id:
raise ValueError('Routing: posting a message without model should be with a null res_id (private message), received %s.' % thread_id)
_warn('posting a message without model should be with a null res_id (private message), received %s resetting thread_id' % thread_id)
thread_id = 0
# Private message: should have a parent_id (only answers)
if not model and not message_dict.get('parent_id'):
if assert_model:
if not message_dict.get('parent_id'):
raise ValueError('Routing: posting a message without model should be with a parent_id (private mesage).')
_warn('posting a message without model should be with a parent_id (private mesage), skipping')
return False
# Existing Document: check if exists; if not, fallback on create if allowed
if thread_id and not model_pool.exists(cr, uid, thread_id):
if create_fallback:
_warn('reply to missing document (%s,%s), fall back on new document creation' % (model, thread_id))
thread_id = None
elif assert_model:
assert model_pool.exists(cr, uid, thread_id), 'Routing: reply to missing document (%s,%s)' % (model, thread_id)
else:
_warn('reply to missing document (%s,%s), skipping' % (model, thread_id))
return False
# Existing Document: check model accepts the mailgateway
if thread_id and model and not hasattr(model_pool, 'message_update'):
if create_fallback:
_warn('model %s does not accept document update, fall back on document creation' % model)
thread_id = None
elif assert_model:
assert hasattr(model_pool, 'message_update'), 'Routing: model %s does not accept document update, crashing' % model
else:
_warn('model %s does not accept document update, skipping' % model)
return False
# New Document: check model accepts the mailgateway
if not thread_id and model and not hasattr(model_pool, 'message_new'):
if assert_model:
if not hasattr(model_pool, 'message_new'):
raise ValueError(
'Model %s does not accept document creation, crashing' % model
)
_warn('model %s does not accept document creation, skipping' % model)
return False
# Update message author if asked
# We do it now because we need it for aliases (contact settings)
if not author_id and update_author:
author_ids = self._find_partner_from_emails(cr, uid, thread_id, [email_from], model=model, context=context)
if author_ids:
author_id = author_ids[0]
message_dict['author_id'] = author_id
# Alias: check alias_contact settings
if alias and alias.alias_contact == 'followers' and (thread_id or alias.alias_parent_thread_id):
if thread_id:
obj = self.pool[model].browse(cr, uid, thread_id, context=context)
else:
obj = self.pool[alias.alias_parent_model_id.model].browse(cr, uid, alias.alias_parent_thread_id, context=context)
if not author_id or not author_id in [fol.id for fol in obj.message_follower_ids]:
_warn('alias %s restricted to internal followers, skipping' % alias.alias_name)
_create_bounce_email()
return False
elif alias and alias.alias_contact == 'partners' and not author_id:
_warn('alias %s does not accept unknown author, skipping' % alias.alias_name)
_create_bounce_email()
return False
if not model and not thread_id and not alias and not allow_private:
return ()
return (model, thread_id, route[2], route[3], None if context.get('drop_alias', False) else route[4])
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param dict message_dict: dictionary holding message variables
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id, alias]
:raises: ValueError, TypeError
"""
if not isinstance(message, Message):
raise TypeError('message must be an email.message.Message at this point')
mail_msg_obj = self.pool['mail.message']
mail_alias = self.pool.get('mail.alias')
fallback_model = model
# Get email.message.Message variables for future processing
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To').strip()
thread_references = references or in_reply_to
# 0. First check if this is a bounce message or not.
# See http://datatracker.ietf.org/doc/rfc3462/?include_text=1
# As all MTA does not respect this RFC (googlemail is one of them),
# we also need to verify if the message come from "mailer-daemon"
localpart = (tools.email_split(email_from) or [''])[0].split('@', 1)[0].lower()
if message.get_content_type() == 'multipart/report' or localpart == 'mailer-daemon':
_logger.info("Not routing bounce email from %s to %s with Message-Id %s",
email_from, email_to, message_id)
return []
# 1. message is a reply to an existing message (exact match of message_id)
ref_match = thread_references and tools.reference_re.search(thread_references)
msg_references = mail_header_msgid_re.findall(thread_references)
mail_message_ids = mail_msg_obj.search(cr, uid, [('message_id', 'in', msg_references)], context=context)
if ref_match and mail_message_ids:
original_msg = mail_msg_obj.browse(cr, SUPERUSER_ID, mail_message_ids[0], context=context)
model, thread_id = original_msg.model, original_msg.res_id
alias_ids = mail_alias.search(cr, uid, [('alias_name', '=', (tools.email_split(email_to) or [''])[0].split('@', 1)[0].lower())])
alias = None
if alias_ids:
alias = mail_alias.browse(cr, uid, [alias_ids[0]], context=context)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, alias),
update_author=True, assert_model=False, create_fallback=True, context=dict(context, drop_alias=True))
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to msg: model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
elif route is False:
return []
# 2. message is a reply to an existign thread (6.1 compatibility)
if ref_match:
reply_thread_id = int(ref_match.group(1))
reply_model = ref_match.group(2) or fallback_model
reply_hostname = ref_match.group(3)
local_hostname = socket.gethostname()
# do not match forwarded emails from another OpenERP system (thread_id collision!)
if local_hostname == reply_hostname:
thread_id, model = reply_thread_id, reply_model
if thread_id and model in self.pool:
model_obj = self.pool[model]
compat_mail_msg_ids = mail_msg_obj.search(
cr, uid, [
('message_id', '=', False),
('model', '=', model),
('res_id', '=', thread_id),
], context=context)
if compat_mail_msg_ids and model_obj.exists(cr, uid, thread_id) and hasattr(model_obj, 'message_update'):
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
# parent is invalid for a compat-reply
message_dict.pop('parent_id', None)
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct thread reply (compat-mode) to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
elif route is False:
return []
# 3. Reply to a private message
if in_reply_to:
mail_message_ids = mail_msg_obj.search(cr, uid, [
('message_id', '=', in_reply_to),
'!', ('message_id', 'ilike', 'reply_to')
], limit=1, context=context)
if mail_message_ids:
mail_message = mail_msg_obj.browse(cr, uid, mail_message_ids[0], context=context)
route = self.message_route_verify(cr, uid, message, message_dict,
(mail_message.model, mail_message.res_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, allow_private=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, mail_message.id, custom_values, uid)
return [route]
elif route is False:
return []
# no route found for a matching reference (or reply), so parent is invalid
message_dict.pop('parent_id', None)
# 4. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0].lower() for e in tools.email_split(rcpt_tos)]
if local_parts:
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
route = (alias.alias_model_id.model, alias.alias_force_thread_id, eval(alias.alias_defaults), user_id, alias)
route = self.message_route_verify(cr, uid, message, message_dict, route,
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, route)
routes.append(route)
return routes
# 5. Fallback to the provided parameters, if they work
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
route = self.message_route_verify(cr, uid, message, message_dict,
(fallback_model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, fallback_model, thread_id, custom_values, uid)
return [route]
# ValueError if no routes found and if no bounce occured
raise ValueError(
'No possible route found for incoming message from %s to %s (Message-Id %s:). '
'Create an appropriate mail.alias or force the destination model.' %
(email_from, email_to, message_id)
)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
# postpone setting message_dict.partner_ids after message_post, to avoid double notifications
context = dict(context or {})
partner_ids = message_dict.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id, alias in routes or ():
if self._name == 'mail.thread':
context['thread_model'] = model
if model:
model_pool = self.pool[model]
if not (thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new')):
raise ValueError(
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" %
(message_dict['message_id'], model)
)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True, mail_create_nolog=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], message_dict, context=nosub_ctx)
else:
# if a new thread is created, parent is irrelevant
message_dict.pop('parent_id', None)
thread_id = model_pool.message_new(cr, user_id, message_dict, custom_values, context=nosub_ctx)
else:
if thread_id:
raise ValueError("Posting a message without model should be with a null res_id, to create a private message.")
model_pool = self.pool.get('mail.thread')
if not hasattr(model_pool, 'message_post'):
context['thread_model'] = model
model_pool = self.pool['mail.thread']
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **message_dict)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, msg, model, thread_id, custom_values, context=context)
thread_id = self.message_route_process(cr, uid, msg_txt, msg, routes, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool[model]
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
# Be careful, content-type may contain tricky content like in the
# following example so test the MIME type with startswith()
#
# Content-Type: multipart/related;
# boundary="_004_3f1e4da175f349248b8d43cdeb9866f1AMSPR06MB343eurprd06pro_";
# type="text/html"
if not message.is_multipart() or message.get('content-type', '').startswith("text/"):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = False
mixed = False
html = u''
for part in message.walk():
if part.get_content_type() == 'multipart/alternative':
alternative = True
if part.get_content_type() == 'multipart/mixed':
mixed = True
if part.get_content_maintype() == 'multipart':
continue # skip container
# part.get_filename returns decoded value if able to decode, coded otherwise.
# original get_filename is not able to decode iso-8859-1 (for instance).
# therefore, iso encoded attachements are not able to be decoded properly with get_filename
# code here partially copy the original get_filename method, but handle more encoding
filename=part.get_param('filename', None, 'content-disposition')
if not filename:
filename=part.get_param('name', None)
if filename:
if isinstance(filename, tuple):
# RFC2231
filename=email.utils.collapse_rfc2231_value(filename).strip()
else:
filename=decode(filename)
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
# mutlipart/alternative have one text and a html part, keep only the second
# mixed allows several html parts, append html content
append_content = not alternative or (html and mixed)
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if not append_content:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To'].strip()))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
msg_list = mail_header_msgid_re.findall(decode(message['References']))
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in', [x.strip() for x in msg_list])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_partner_info_from_emails(cr, uid, obj.id, [email], context=context)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info['partner_id']], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner.id in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict((res_id, []) for res_id in ids)
if 'user_id' in self._fields:
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._fields['user_id'].string, context=context)
return result
def _find_partner_from_emails(self, cr, uid, id, emails, model=None, context=None, check_followers=True):
""" Utility method to find partners from email addresses. The rules are :
1 - check in document (model | self, id) followers
2 - try to find a matching partner that is also an user
3 - try to find a matching partner
:param list emails: list of email addresses
:param string model: model to fetch related record; by default self
is used.
:param boolean check_followers: check in document followers
"""
partner_obj = self.pool['res.partner']
users_obj = self.pool['res.users']
partner_ids = []
obj = None
if id and (model or self._name != 'mail.thread') and check_followers:
if model:
obj = self.pool[model].browse(cr, uid, id, context=context)
else:
obj = self.browse(cr, uid, id, context=context)
for contact in emails:
partner_id = False
email_address = tools.email_split(contact)
if not email_address:
partner_ids.append(partner_id)
continue
email_address = email_address[0]
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_id = follower.id
# second try: check in partners that are also users
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
if not partner_id:
# exact, case-insensitive match
user_ids = users_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address)],
limit=1, context=context)
ids = [users_obj.browse(cr, SUPERUSER_ID, user_ids, context=context).partner_id.id]
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
user_ids = users_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets)], limit=1, context=context)
ids = [users_obj.browse(cr, SUPERUSER_ID, user_ids, context=context).partner_id.id]
if ids:
partner_id = ids[0]
# third try: check in partners
if not partner_id:
# exact, case-insensitive match
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address)],
limit=1, context=context)
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets)],
limit=1, context=context)
if ids:
partner_id = ids[0]
partner_ids.append(partner_id)
return partner_ids
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids """
mail_message_obj = self.pool.get('mail.message')
partner_ids = self._find_partner_from_emails(cr, uid, id, emails, context=context)
result = list()
for idx in range(len(emails)):
email_address = emails[idx]
partner_id = partner_ids[idx]
partner_info = {'full_name': email_address, 'partner_id': partner_id}
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=ilike', email_address),
('email_from', 'ilike', email_brackets),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def _message_preprocess_attachments(self, cr, uid, attachments, attachment_ids, attach_model, attach_res_id, context=None):
""" Preprocess attachments for mail_thread.message_post() or mail_mail.create().
:param list attachments: list of attachment tuples in the form ``(name,content)``,
where content is NOT base64 encoded
:param list attachment_ids: a list of attachment ids, not in tomany command form
:param str attach_model: the model of the attachments parent record
:param integer attach_res_id: the id of the attachments parent record
"""
Attachment = self.pool['ir.attachment']
m2m_attachment_ids = []
if attachment_ids:
filtered_attachment_ids = Attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
Attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': attach_model, 'res_id': attach_res_id}, context=context)
m2m_attachment_ids += [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': attach_model,
'res_id': attach_res_id,
}
m2m_attachment_ids.append((0, 0, data_attach))
return m2m_attachment_ids
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', False) if self._name == 'mail.thread' else self._name
if model and model != self._name and hasattr(self.pool[model], 'message_post'):
del context['thread_model']
return self.pool[model].message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
#0: Find the message's author, because we need it for private discussion
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = self._message_preprocess_attachments(cr, uid, attachments, kwargs.pop('attachment_ids', []), model, thread_id, context)
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
subtype_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, subtype)
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and model and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model), ('type', '=', 'email')], context=context, order="id ASC", limit=1)
if not message_ids:
message_ids = message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': model and thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Post the message
msg_id = mail_message.create(cr, uid, values, context=context)
# Post-process: subscribe author, update message_last_post
if model and model != 'mail.thread' and thread_id and subtype_id:
# done with SUPERUSER_ID, because on some models users can post only with read access, not necessarily write access
self.write(cr, SUPERUSER_ID, [thread_id], {'message_last_post': fields.datetime.now()}, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and model and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, user_pid=user_pid, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
if context is None:
context = {}
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
mail_followers_obj = self.pool.get('mail.followers')
subtype_obj = self.pool.get('mail.message.subtype')
user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if set(partner_ids) == set([user_pid]):
try:
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
except (osv.except_osv, orm.except_orm):
return False
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
existing_pids_dict = {}
fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)])
for fol in mail_followers_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context):
existing_pids_dict.setdefault(fol.res_id, set()).add(fol.partner_id.id)
# subtype_ids specified: update already subscribed partners
if subtype_ids and fol_ids:
mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
# subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners
if subtype_ids is None:
subtype_ids = subtype_obj.search(
cr, uid, [
('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
for id in ids:
existing_pids = existing_pids_dict.get(id, set())
new_pids = set(partner_ids) - existing_pids
# subscribe new followers
for new_pid in new_pids:
mail_followers_obj.create(
cr, SUPERUSER_ID, {
'res_model': self._name,
'res_id': id,
'partner_id': new_pid,
'subtype_ids': [(6, 0, subtype_ids)],
}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
fol_obj = self.pool['mail.followers']
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', self._name),
('res_id', 'in', ids),
('partner_id', 'in', partner_ids)
], context=context)
return fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and getattr(field, 'track_visibility', False) and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
def _message_auto_subscribe_notify(self, cr, uid, ids, partner_ids, context=None):
""" Send notifications to the partners automatically subscribed to the thread
Override this method if a custom behavior is needed about partners
that should be notified or messages that should be sent
"""
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if partner_ids:
for record_id in ids:
message_obj = self.pool.get('mail.message')
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids:
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id)], limit=1, context=context)
if msg_ids:
notification_obj = self.pool.get('mail.notification')
notification_obj._notify(cr, uid, msg_ids[0], partners_to_notify=partner_ids, context=context)
message = message_obj.browse(cr, SUPERUSER_ID, msg_ids[0], context=context)
if message.parent_id:
partner_ids_to_parent_notify = set(partner_ids).difference(partner.id for partner in message.parent_id.notified_partner_ids)
for partner_id in partner_ids_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner_id,
'is_read': True,
}, context=context)
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None, values=None):
""" Handle auto subscription. Two methods for auto subscription exist:
- tracked res.users relational fields, such as user_id fields. Those fields
must be relation fields toward a res.users record, and must have the
track_visilibity attribute set.
- using subtypes parent relationship: check if the current model being
modified has an header record (such as a project for tasks) whose followers
can be added as followers of the current records. Example of structure
with project and task:
- st_project_1.parent_id = st_task_1
- st_project_1.res_model = 'project.project'
- st_project_1.relation_field = 'project_id'
- st_task_1.model = 'project.task'
:param list updated_fields: list of updated fields to track
:param dict values: updated values; if None, the first record will be browsed
to get the values. Added after releasing 7.0, therefore
not merged with updated_fields argumment.
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
new_followers = dict()
# fetch auto_follow_fields: res.users relation fields whose changes are tracked for subscription
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch header subtypes
header_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, header_subtype_ids, context=context)
# if no change in tracked field or no change in tracked relational field: quit
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field is not False])
if not any(relation in updated_fields for relation in relation_fields) and not user_field_lst:
return True
# legacy behavior: if values is not given, compute the values by browsing
# @TDENOTE: remove me in 8.0
if values is None:
record = self.browse(cr, uid, ids[0], context=context)
for updated_field in updated_fields:
field_value = getattr(record, updated_field)
if isinstance(field_value, BaseModel):
field_value = field_value.id
values[updated_field] = field_value
# find followers of headers, update structure for new followers
headers = set()
for subtype in subtypes:
if subtype.relation_field and values.get(subtype.relation_field):
headers.add((subtype.res_model, values.get(subtype.relation_field)))
if headers:
header_domain = ['|'] * (len(headers) - 1)
for header in headers:
header_domain += ['&', ('res_model', '=', header[0]), ('res_id', '=', header[1])]
header_follower_ids = follower_obj.search(
cr, SUPERUSER_ID,
header_domain,
context=context
)
for header_follower in follower_obj.browse(cr, SUPERUSER_ID, header_follower_ids, context=context):
for subtype in header_follower.subtype_ids:
if subtype.parent_id and subtype.parent_id.res_model == self._name:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.parent_id.id)
elif subtype.res_model is False:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [values[name] for name in user_field_lst if values.get(name)]
user_pids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_pids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, ids, [pid], subtypes, context=context)
self._message_auto_subscribe_notify(cr, uid, ids, user_pids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
#------------------------------------------------------
# Thread suggestion
#------------------------------------------------------
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Return a list of suggested threads, sorted by the numbers of followers"""
if context is None:
context = {}
# TDE HACK: originally by MAT from portal/mail_mail.py but not working until the inheritance graph bug is not solved in trunk
# TDE FIXME: relocate in portal when it won't be necessary to reload the hr.employee model in an additional bridge module
if 'is_portal' in self.pool['res.groups']._fields:
user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if any(group.is_portal for group in user.groups_id):
return []
threads = []
if removed_suggested_threads is None:
removed_suggested_threads = []
thread_ids = self.search(cr, uid, [('id', 'not in', removed_suggested_threads), ('message_is_follower', '=', False)], context=context)
for thread in self.browse(cr, uid, thread_ids, context=context):
data = {
'id': thread.id,
'popularity': len(thread.message_follower_ids),
'name': thread.name,
'image_small': thread.image_small
}
threads.append(data)
return sorted(threads, key=lambda x: (x['popularity'], x['id']), reverse=True)[:3]
def message_change_thread(self, cr, uid, id, new_res_id, new_model, context=None):
"""
Transfert the list of the mail thread messages from an model to another
:param id : the old res_id of the mail.message
:param new_res_id : the new res_id of the mail.message
:param new_model : the name of the new model of the mail.message
Example : self.pool.get("crm.lead").message_change_thread(self, cr, uid, 2, 4, "project.issue", context)
will transfert thread of the lead (id=2) to the issue (id=4)
"""
# get the sbtype id of the comment Message
subtype_res_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'mail.mt_comment', raise_if_not_found=True)
# get the ids of the comment and none-comment of the thread
message_obj = self.pool.get('mail.message')
msg_ids_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '=', subtype_res_id)], context=context)
msg_ids_not_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '!=', subtype_res_id)], context=context)
# update the messages
message_obj.write(cr, uid, msg_ids_comment, {"res_id" : new_res_id, "model" : new_model}, context=context)
message_obj.write(cr, uid, msg_ids_not_comment, {"res_id" : new_res_id, "model" : new_model, "subtype_id" : None}, context=context)
return True
|
kxliugang/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/migrations/0004_auto__add_field_generatedcertificate_graded_certificate_id__add_field_.py
|
188
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.graded_certificate_id'
db.add_column('certificates_generatedcertificate', 'graded_certificate_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True), keep_default=False)
# Adding field 'GeneratedCertificate.graded_download_url'
db.add_column('certificates_generatedcertificate', 'graded_download_url', self.gf('django.db.models.fields.CharField')(max_length=128, null=True), keep_default=False)
# Adding field 'GeneratedCertificate.grade'
db.add_column('certificates_generatedcertificate', 'grade', self.gf('django.db.models.fields.CharField')(max_length=5, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.graded_certificate_id'
db.delete_column('certificates_generatedcertificate', 'graded_certificate_id')
# Deleting field 'GeneratedCertificate.graded_download_url'
db.delete_column('certificates_generatedcertificate', 'graded_download_url')
# Deleting field 'GeneratedCertificate.grade'
db.delete_column('certificates_generatedcertificate', 'grade')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'graded_certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'graded_download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
axtra/ansible
|
refs/heads/devel
|
v2/ansible/playbook/conditional.py
|
23
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def evaluate_conditional(self, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
if conditional in all_vars and '-' not in unicode(all_vars[conditional]):
conditional = all_vars[conditional]
conditional = templar.template(conditional)
if not isinstance(conditional, basestring) or conditional == "":
return conditional
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = templar.template(presented)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in original:
return True
elif "is defined" in original:
return False
else:
raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented))
elif val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
Oteng/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/nhl.py
|
25
|
from __future__ import unicode_literals
import re
import json
import os
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_urllib_parse,
compat_urllib_parse_urlparse
)
from ..utils import (
unified_strdate,
)
class NHLBaseInfoExtractor(InfoExtractor):
@staticmethod
def _fix_json(json_string):
return json_string.replace('\\\'', '\'')
def _real_extract_video(self, video_id):
vid_parts = video_id.split(',')
if len(vid_parts) == 3:
video_id = '%s0%s%s-X-h' % (vid_parts[0][:4], vid_parts[1], vid_parts[2].rjust(4, '0'))
json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
data = self._download_json(
json_url, video_id, transform_source=self._fix_json)
return self._extract_video(data[0])
def _extract_video(self, info):
video_id = info['id']
self.report_extraction(video_id)
initial_video_url = info['publishPoint']
if info['formats'] == '1':
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
filename, ext = os.path.splitext(parsed_url.path)
path = '%s_sd%s' % (filename, ext)
data = compat_urllib_parse.urlencode({
'type': 'fvod',
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
})
path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
path_doc = self._download_xml(
path_url, video_id, 'Downloading final video url')
video_url = path_doc.find('path').text
else:
video_url = initial_video_url
join = compat_urlparse.urljoin
ret = {
'id': video_id,
'title': info['name'],
'url': video_url,
'description': info['description'],
'duration': int(info['duration']),
'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),
}
if video_url.startswith('rtmp:'):
mobj = re.match(r'(?P<tc_url>rtmp://[^/]+/(?P<app>[a-z0-9/]+))/(?P<play_path>mp4:.*)', video_url)
ret.update({
'tc_url': mobj.group('tc_url'),
'play_path': mobj.group('play_path'),
'app': mobj.group('app'),
'no_resume': True,
})
return ret
class NHLIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com'
_VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)'
_TESTS = [{
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
'md5': 'db704a4ea09e8d3988c85e36cc892d09',
'info_dict': {
'id': '453614',
'ext': 'mp4',
'title': 'Quick clip: Weise 4-3 goal vs Flames',
'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
'duration': 18,
'upload_date': '20131006',
},
}, {
'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
'md5': 'd22e82bc592f52d37d24b03531ee9696',
'info_dict': {
'id': '2014020024-628-h',
'ext': 'mp4',
'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
'duration': 0,
'upload_date': '20141011',
},
}, {
'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
'md5': 'c78fc64ea01777e426cfc202b746c825',
'info_dict': {
'id': '58665',
'ext': 'flv',
'title': 'Classic Game In Six - April 22, 1979',
'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
'duration': 400,
'upload_date': '20100129'
},
}, {
'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
'only_matching': True,
}, {
'url': 'http://video.nhl.com/videocenter/?id=736722',
'only_matching': True,
}, {
'url': 'http://video.nhl.com/videocenter/console?hlg=20142015,2,299&lang=en',
'md5': '076fcb88c255154aacbf0a7accc3f340',
'info_dict': {
'id': '2014020299-X-h',
'ext': 'mp4',
'title': 'Penguins at Islanders / Game Highlights',
'description': 'Home broadcast - Pittsburgh Penguins at New York Islanders - November 22, 2014',
'duration': 268,
'upload_date': '20141122',
}
}, {
'url': 'http://video.oilers.nhl.com/videocenter/console?id=691469&catid=4',
'info_dict': {
'id': '691469',
'ext': 'mp4',
'title': 'RAW | Craig MacTavish Full Press Conference',
'description': 'Oilers GM Craig MacTavish addresses the media at Rexall Place on Friday.',
'upload_date': '20141205',
},
'params': {
'skip_download': True, # Requires rtmpdump
}
}, {
'url': 'http://video.nhl.com/videocenter/embed?playlist=836127',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._real_extract_video(video_id)
class NHLNewsIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:news'
IE_DESC = 'NHL news'
_VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
_TESTS = [{
'url': 'http://www.nhl.com/ice/news.htm?id=750727',
'md5': '4b3d1262e177687a3009937bd9ec0be8',
'info_dict': {
'id': '736722',
'ext': 'mp4',
'title': 'Cal Clutterbuck has been fined $2,000',
'description': 'md5:45fe547d30edab88b23e0dd0ab1ed9e6',
'duration': 37,
'upload_date': '20150128',
},
}, {
# iframe embed
'url': 'http://sabres.nhl.com/club/news.htm?id=780189',
'md5': '9f663d1c006c90ac9fb82777d4294e12',
'info_dict': {
'id': '836127',
'ext': 'mp4',
'title': 'Morning Skate: OTT vs. BUF (9/23/15)',
'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.",
'duration': 93,
'upload_date': '20150923',
},
}]
def _real_extract(self, url):
news_id = self._match_id(url)
webpage = self._download_webpage(url, news_id)
video_id = self._search_regex(
[r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'",
r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'],
webpage, 'video id')
return self._real_extract_video(video_id)
class NHLVideocenterIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:videocenter'
IE_DESC = 'NHL videocenter category'
_VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
_TEST = {
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
'info_dict': {
'id': '999',
'title': 'Highlights',
},
'playlist_count': 12,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
team = mobj.group('team')
webpage = self._download_webpage(url, team)
cat_id = self._search_regex(
[r'var defaultCatId = "(.+?)";',
r'{statusIndex:0,index:0,.*?id:(.*?),'],
webpage, 'category id')
playlist_title = self._html_search_regex(
r'tab0"[^>]*?>(.*?)</td>',
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
data = compat_urllib_parse.urlencode({
'cid': cat_id,
# This is the default value
'count': 12,
'ptrs': 3,
'format': 'json',
})
path = '/videocenter/servlets/browse?' + data
request_url = compat_urlparse.urljoin(url, path)
response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response)
if not response.strip():
self._downloader.report_warning('Got an empty reponse, trying '
'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true',
playlist_title)
response = self._fix_json(response)
videos = json.loads(response)
return {
'_type': 'playlist',
'title': playlist_title,
'id': cat_id,
'entries': [self._extract_video(v) for v in videos],
}
|
andykimpe/chromium-test-npapi
|
refs/heads/master
|
tools/checkbins/checkbins.py
|
77
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all EXE and DLL files in the provided directory were built
correctly.
In essense it runs a subset of BinScope tests ensuring that binaries have
/NXCOMPAT, /DYNAMICBASE and /SAFESEH.
"""
import os
import optparse
import sys
# Find /third_party/pefile based on current directory and script path.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'pefile'))
import pefile
PE_FILE_EXTENSIONS = ['.exe', '.dll']
DYNAMICBASE_FLAG = 0x0040
NXCOMPAT_FLAG = 0x0100
NO_SEH_FLAG = 0x0400
MACHINE_TYPE_AMD64 = 0x8664
# Please do not add your file here without confirming that it indeed doesn't
# require /NXCOMPAT and /DYNAMICBASE. Contact cpu@chromium.org or your local
# Windows guru for advice.
EXCLUDED_FILES = ['chrome_frame_mini_installer.exe',
'mini_installer.exe',
'wow_helper.exe',
'xinput1_3.dll' # Microsoft DirectX redistributable.
]
def IsPEFile(path):
return (os.path.isfile(path) and
os.path.splitext(path)[1].lower() in PE_FILE_EXTENSIONS and
os.path.basename(path) not in EXCLUDED_FILES)
def main(options, args):
directory = args[0]
pe_total = 0
pe_passed = 0
for file in os.listdir(directory):
path = os.path.abspath(os.path.join(directory, file))
if not IsPEFile(path):
continue
pe = pefile.PE(path, fast_load=True)
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG']])
pe_total = pe_total + 1
success = True
# Check for /DYNAMICBASE.
if pe.OPTIONAL_HEADER.DllCharacteristics & DYNAMICBASE_FLAG:
if options.verbose:
print "Checking %s for /DYNAMICBASE... PASS" % path
else:
success = False
print "Checking %s for /DYNAMICBASE... FAIL" % path
# Check for /NXCOMPAT.
if pe.OPTIONAL_HEADER.DllCharacteristics & NXCOMPAT_FLAG:
if options.verbose:
print "Checking %s for /NXCOMPAT... PASS" % path
else:
success = False
print "Checking %s for /NXCOMPAT... FAIL" % path
# Check for /SAFESEH. Binaries should meet one of the following
# criteria:
# 1) Have no SEH table as indicated by the DLL characteristics
# 2) Have a LOAD_CONFIG section containing a valid SEH table
# 3) Be a 64-bit binary, in which case /SAFESEH isn't required
#
# Refer to the following MSDN article for more information:
# http://msdn.microsoft.com/en-us/library/9a89h429.aspx
if (pe.OPTIONAL_HEADER.DllCharacteristics & NO_SEH_FLAG or
(hasattr(pe, "DIRECTORY_ENTRY_LOAD_CONFIG") and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerCount > 0 and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable != 0) or
pe.FILE_HEADER.Machine == MACHINE_TYPE_AMD64):
if options.verbose:
print "Checking %s for /SAFESEH... PASS" % path
else:
success = False
print "Checking %s for /SAFESEH... FAIL" % path
# ASLR is weakened on Windows 64-bit when the ImageBase is below 4GB
# (because the loader will never be rebase the image above 4GB).
if pe.FILE_HEADER.Machine == MACHINE_TYPE_AMD64:
if pe.OPTIONAL_HEADER.ImageBase <= 0xFFFFFFFF:
print("Checking %s ImageBase (0x%X < 4GB)... FAIL" %
(path, pe.OPTIONAL_HEADER.ImageBase))
success = False
elif options.verbose:
print("Checking %s ImageBase (0x%X > 4GB)... PASS" %
(path, pe.OPTIONAL_HEADER.ImageBase))
# Update tally.
if success:
pe_passed = pe_passed + 1
print "Result: %d files found, %d files passed" % (pe_total, pe_passed)
if pe_passed != pe_total:
sys.exit(1)
if __name__ == '__main__':
usage = "Usage: %prog [options] DIRECTORY"
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option("-v", "--verbose", action="store_true",
default=False, help="Print debug logging")
options, args = option_parser.parse_args()
if not args:
option_parser.print_help()
sys.exit(0)
main(options, args)
|
jazkarta/edx-platform
|
refs/heads/master
|
common/djangoapps/third_party_auth/tasks.py
|
81
|
# -*- coding: utf-8 -*-
"""
Code to manage fetching and storing the metadata of IdPs.
"""
#pylint: disable=no-member
from celery.task import task # pylint: disable=import-error,no-name-in-module
import datetime
import dateutil.parser
import logging
from lxml import etree
import requests
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from third_party_auth.models import SAMLConfiguration, SAMLProviderConfig, SAMLProviderData
log = logging.getLogger(__name__)
SAML_XML_NS = 'urn:oasis:names:tc:SAML:2.0:metadata' # The SAML Metadata XML namespace
class MetadataParseError(Exception):
""" An error occurred while parsing the SAML metadata from an IdP """
pass
@task(name='third_party_auth.fetch_saml_metadata')
def fetch_saml_metadata():
"""
Fetch and store/update the metadata of all IdPs
This task should be run on a daily basis.
It's OK to run this whether or not SAML is enabled.
Return value:
tuple(num_changed, num_failed, num_total)
num_changed: Number of providers that are either new or whose metadata has changed
num_failed: Number of providers that could not be updated
num_total: Total number of providers whose metadata was fetched
"""
if not SAMLConfiguration.is_enabled():
return (0, 0, 0) # Nothing to do until SAML is enabled.
num_changed, num_failed = 0, 0
# First make a list of all the metadata XML URLs:
url_map = {}
for idp_slug in SAMLProviderConfig.key_values('idp_slug', flat=True):
config = SAMLProviderConfig.current(idp_slug)
if not config.enabled:
continue
url = config.metadata_source
if url not in url_map:
url_map[url] = []
if config.entity_id not in url_map[url]:
url_map[url].append(config.entity_id)
# Now fetch the metadata:
for url, entity_ids in url_map.items():
try:
log.info("Fetching %s", url)
if not url.lower().startswith('https'):
log.warning("This SAML metadata URL is not secure! It should use HTTPS. (%s)", url)
response = requests.get(url, verify=True) # May raise HTTPError or SSLError or ConnectionError
response.raise_for_status() # May raise an HTTPError
try:
parser = etree.XMLParser(remove_comments=True)
xml = etree.fromstring(response.content, parser)
except etree.XMLSyntaxError:
raise
# TODO: Can use OneLogin_Saml2_Utils to validate signed XML if anyone is using that
for entity_id in entity_ids:
log.info(u"Processing IdP with entityID %s", entity_id)
public_key, sso_url, expires_at = _parse_metadata_xml(xml, entity_id)
changed = _update_data(entity_id, public_key, sso_url, expires_at)
if changed:
log.info(u"→ Created new record for SAMLProviderData")
num_changed += 1
else:
log.info(u"→ Updated existing SAMLProviderData. Nothing has changed.")
except Exception as err: # pylint: disable=broad-except
log.exception(err.message)
num_failed += 1
return (num_changed, num_failed, len(url_map))
def _parse_metadata_xml(xml, entity_id):
"""
Given an XML document containing SAML 2.0 metadata, parse it and return a tuple of
(public_key, sso_url, expires_at) for the specified entityID.
Raises MetadataParseError if anything is wrong.
"""
if xml.tag == etree.QName(SAML_XML_NS, 'EntityDescriptor'):
entity_desc = xml
else:
if xml.tag != etree.QName(SAML_XML_NS, 'EntitiesDescriptor'):
raise MetadataParseError("Expected root element to be <EntitiesDescriptor>, not {}".format(xml.tag))
entity_desc = xml.find(
".//{}[@entityID='{}']".format(etree.QName(SAML_XML_NS, 'EntityDescriptor'), entity_id)
)
if not entity_desc:
raise MetadataParseError("Can't find EntityDescriptor for entityID {}".format(entity_id))
expires_at = None
if "validUntil" in xml.attrib:
expires_at = dateutil.parser.parse(xml.attrib["validUntil"])
if "cacheDuration" in xml.attrib:
cache_expires = OneLogin_Saml2_Utils.parse_duration(xml.attrib["cacheDuration"])
if expires_at is None or cache_expires < expires_at:
expires_at = cache_expires
sso_desc = entity_desc.find(etree.QName(SAML_XML_NS, "IDPSSODescriptor"))
if not sso_desc:
raise MetadataParseError("IDPSSODescriptor missing")
if 'urn:oasis:names:tc:SAML:2.0:protocol' not in sso_desc.get("protocolSupportEnumeration"):
raise MetadataParseError("This IdP does not support SAML 2.0")
# Now we just need to get the public_key and sso_url
public_key = sso_desc.findtext("./{}//{}".format(
etree.QName(SAML_XML_NS, "KeyDescriptor"), "{http://www.w3.org/2000/09/xmldsig#}X509Certificate"
))
if not public_key:
raise MetadataParseError("Public Key missing. Expected an <X509Certificate>")
public_key = public_key.replace(" ", "")
binding_elements = sso_desc.iterfind("./{}".format(etree.QName(SAML_XML_NS, "SingleSignOnService")))
sso_bindings = {element.get('Binding'): element.get('Location') for element in binding_elements}
try:
# The only binding supported by python-saml and python-social-auth is HTTP-Redirect:
sso_url = sso_bindings['urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect']
except KeyError:
raise MetadataParseError("Unable to find SSO URL with HTTP-Redirect binding.")
return public_key, sso_url, expires_at
def _update_data(entity_id, public_key, sso_url, expires_at):
"""
Update/Create the SAMLProviderData for the given entity ID.
Return value:
False if nothing has changed and existing data's "fetched at" timestamp is just updated.
True if a new record was created. (Either this is a new provider or something changed.)
"""
data_obj = SAMLProviderData.current(entity_id)
fetched_at = datetime.datetime.now()
if data_obj and (data_obj.public_key == public_key and data_obj.sso_url == sso_url):
data_obj.expires_at = expires_at
data_obj.fetched_at = fetched_at
data_obj.save()
return False
else:
SAMLProviderData.objects.create(
entity_id=entity_id,
fetched_at=fetched_at,
expires_at=expires_at,
sso_url=sso_url,
public_key=public_key,
)
return True
|
sbidoul/buildbot
|
refs/heads/master
|
master/buildbot/buildslave/__init__.py
|
11
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# This module is left for backward compatibility of old-named worker API.
# It should never be imported by Buildbot.
from buildbot.worker import AbstractLatentWorker as _AbstractLatentWorker
from buildbot.worker import AbstractWorker as _AbstractWorker
from buildbot.worker import Worker as _Worker
from buildbot.worker_transition import deprecatedWorkerModuleAttribute
from buildbot.worker_transition import reportDeprecatedWorkerModuleUsage
reportDeprecatedWorkerModuleUsage(
"'{old}' module is deprecated, use "
"'buildbot.worker' module instead".format(old=__name__))
deprecatedWorkerModuleAttribute(locals(), _AbstractWorker,
compat_name="AbstractBuildSlave",
new_name="AbstractWorker")
deprecatedWorkerModuleAttribute(locals(), _Worker,
compat_name="BuildSlave",
new_name="Worker")
deprecatedWorkerModuleAttribute(locals(), _AbstractLatentWorker,
compat_name="AbstractLatentBuildSlave",
new_name="AbstractLatentWorker")
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/namespaces.py
|
196
|
import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))",
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
|
jbclements/rust
|
refs/heads/master
|
src/etc/lldb_batchmode.py
|
9
|
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: a the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
import lldb
import os
import sys
import threading
import re
import atexit
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"Print something if DEBUG_OUTPUT is True"
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"Replace newlines, tabs, multiple spaces, etc with exactly one space"
return re.sub("\s+", " ", s)
# This callback is registered with every breakpoint and makes sure that the frame containing the
# breakpoint location is selected
def breakpoint_callback(frame, bp_loc, dict):
"Called whenever a breakpoint is hit"
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"Executes a single CLI command"
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
print(normalize_whitespace(res.GetOutput()), end = '\n')
# If the command introduced any breakpoints, make sure to register them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." % str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = "breakpoint command add -F breakpoint_callback " + str(breakpoint_id)
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " + str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " + str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"Listens for breakpoints being added and adds new ones to the callback registration list"
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target = listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target = debugger.CreateTargetWithFileAndArch(target_path, lldb.LLDB_ARCH_DEFAULT)
if not target:
print("Could not create debugging target '" + target_path + "'. Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file = sys.stderr)
print(e, file = sys.stderr)
print("Aborting.", file = sys.stderr)
sys.exit(1)
finally:
script_file.close()
|
pombredanne/django-rest-framework-jsonapi
|
refs/heads/master
|
tests/views.py
|
1
|
from rest_framework import viewsets, permissions
from rest_framework_jsonapi.pagination import (
PageNumberPagination, LimitOffsetPagination, CursorPagination)
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.throttling import AnonRateThrottle
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from tests.models import Article, Person, Comment, TestFormattingWithABBR
from tests.serializers import (
ArticleSerializer, PersonSerializer, CommentSerializer,
ImproperlyConfiguredReadOnlyAuthorCommentSerializer,
ReadOnlyAuthorCommentSerializer, OnlyCommentSerializer,
TestFormattingWithABBRSerializer)
class DenyPermission(permissions.BasePermission):
def has_permission(self, request, view):
return False
class Articles(viewsets.ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
pagination_class = PageNumberPagination
class People(viewsets.ModelViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
pagination_class = LimitOffsetPagination
class AuthenticatedPeople(viewsets.ModelViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
permission_classes = (DenyPermission,)
class Comments(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
pagination_class = CursorPagination
class OnlyComments(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = OnlyCommentSerializer
class ImproperlyConfiguredReadOnlyAuthorComments(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = ImproperlyConfiguredReadOnlyAuthorCommentSerializer
class ReadOnlyAuthorComments(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = ReadOnlyAuthorCommentSerializer
class TestFormattingWithABBRs(viewsets.ModelViewSet):
queryset = TestFormattingWithABBR.objects.all()
serializer_class = TestFormattingWithABBRSerializer
class AnonImmediateRateThrottle(AnonRateThrottle):
rate = '0/sec'
scope = 'seconds'
@api_view()
@throttle_classes([AnonImmediateRateThrottle])
def throttled_view(request):
return Response("Throttled")
@api_view()
def validation_error_view(request):
raise ValidationError("Validation error")
@api_view()
def errored_view(request):
raise NotImplementedError("Errored view")
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/innerImports/after/src/a.py
|
45382
| |
msingh172/youtube-dl
|
refs/heads/master
|
youtube_dl/downloader/rtsp.py
|
119
|
from __future__ import unicode_literals
import os
import subprocess
from .common import FileDownloader
from ..utils import (
check_executable,
encodeFilename,
)
class RtspFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
if check_executable('mplayer', ['-h']):
args = [
'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
'-dumpstream', '-dumpfile', tmpfilename, url]
elif check_executable('mpv', ['-h']):
args = [
'mpv', '-really-quiet', '--vo=null', '--stream-dump=' + tmpfilename, url]
else:
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.')
return False
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (args[0], retval))
return False
|
dezelin/virtualbox
|
refs/heads/master
|
src/libs/libxml2-2.6.31/python/tests/pushSAX.py
|
87
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
log = ""
class callback:
def startDocument(self):
global log
log = log + "startDocument:"
def endDocument(self):
global log
log = log + "endDocument:"
def startElement(self, tag, attrs):
global log
log = log + "startElement %s %s:" % (tag, attrs)
def endElement(self, tag):
global log
log = log + "endElement %s:" % (tag)
def characters(self, data):
global log
log = log + "characters: %s:" % (data)
def warning(self, msg):
global log
log = log + "warning: %s:" % (msg)
def error(self, msg):
global log
log = log + "error: %s:" % (msg)
def fatalError(self, msg):
global log
log = log + "fatalError: %s:" % (msg)
handler = callback()
ctxt = libxml2.createPushParser(handler, "<foo", 4, "test.xml")
chunk = " url='tst'>b"
ctxt.parseChunk(chunk, len(chunk), 0)
chunk = "ar</foo>"
ctxt.parseChunk(chunk, len(chunk), 1)
ctxt=None
reference = "startDocument:startElement foo {'url': 'tst'}:characters: bar:endElement foo:endDocument:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
msimacek/samba
|
refs/heads/master
|
third_party/dnspython/dns/edns.py
|
47
|
# Copyright (C) 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param rdtype: The rdata type
@type rdtype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def _cmp(self, other):
"""Compare an ENDS option with another option of the same type.
Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current : current + olen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/pytest-django-3.10.0/pytest_django/asserts.py
|
2
|
"""
Dynamically load all Django assertion cases and expose them for importing.
"""
from functools import wraps
from django.test import (
TestCase, SimpleTestCase,
LiveServerTestCase, TransactionTestCase
)
test_case = TestCase('run')
def _wrapper(name):
func = getattr(test_case, name)
@wraps(func)
def assertion_func(*args, **kwargs):
return func(*args, **kwargs)
return assertion_func
__all__ = []
assertions_names = set()
assertions_names.update(
set(attr for attr in vars(TestCase) if attr.startswith('assert')),
set(attr for attr in vars(SimpleTestCase) if attr.startswith('assert')),
set(attr for attr in vars(LiveServerTestCase) if attr.startswith('assert')),
set(attr for attr in vars(TransactionTestCase) if attr.startswith('assert')),
)
for assert_func in assertions_names:
globals()[assert_func] = _wrapper(assert_func)
__all__.append(assert_func)
|
Chetwyn/git-cola
|
refs/heads/master
|
cola/widgets/compare.py
|
7
|
"""Provides dialogs for comparing branches and commits."""
from __future__ import division, absolute_import, unicode_literals
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL
from cola import qtutils
from cola import difftool
from cola import gitcmds
from cola import icons
from cola.i18n import N_
from cola.qtutils import connect_button
from cola.widgets import defs
from cola.widgets import standard
from cola.compat import ustr
class FileItem(QtGui.QTreeWidgetItem):
def __init__(self, path, icon):
QtGui.QTreeWidgetItem.__init__(self, [path])
self.path = path
self.setIcon(0, icon)
def compare_branches():
"""Launches a dialog for comparing a pair of branches"""
view = CompareBranchesDialog(qtutils.active_window())
view.show()
return view
class CompareBranchesDialog(standard.Dialog):
def __init__(self, parent):
standard.Dialog.__init__(self, parent=parent)
self.BRANCH_POINT = N_('*** Branch Point ***')
self.SANDBOX = N_('*** Sandbox ***')
self.LOCAL = N_('Local')
self.setWindowTitle(N_('Branch Diff Viewer'))
self.remote_branches = gitcmds.branch_list(remote=True)
self.local_branches = gitcmds.branch_list(remote=False)
self.top_widget = QtGui.QWidget()
self.bottom_widget = QtGui.QWidget()
self.left_combo = QtGui.QComboBox()
self.left_combo.addItem(N_('Local'))
self.left_combo.addItem(N_('Remote'))
self.left_combo.setCurrentIndex(0)
self.right_combo = QtGui.QComboBox()
self.right_combo.addItem(N_('Local'))
self.right_combo.addItem(N_('Remote'))
self.right_combo.setCurrentIndex(1)
self.left_list = QtGui.QListWidget()
self.right_list = QtGui.QListWidget()
self.button_spacer = QtGui.QSpacerItem(1, 1,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Minimum)
self.button_compare = qtutils.create_button(text=N_('Compare'),
icon=icons.diff())
self.button_close = qtutils.close_button()
self.diff_files = standard.TreeWidget()
self.diff_files.headerItem().setText(0, N_('File Differences'))
self.top_grid_layout = qtutils.grid(
defs.no_margin, defs.spacing,
(self.left_combo, 0, 0, 1, 1),
(self.left_list, 1, 0, 1, 1),
(self.right_combo, 0, 1, 1, 1),
(self.right_list, 1, 1, 1, 1))
self.top_widget.setLayout(self.top_grid_layout)
self.bottom_grid_layout = qtutils.grid(
defs.no_margin, defs.spacing,
(self.diff_files, 0, 0, 1, 4),
(self.button_spacer, 1, 1, 1, 1),
(self.button_compare, 1, 2, 1, 1),
(self.button_close, 1, 3, 1, 1))
self.bottom_widget.setLayout(self.bottom_grid_layout)
self.splitter = qtutils.splitter(Qt.Vertical,
self.top_widget, self.bottom_widget)
self.main_layout = qtutils.vbox(defs.margin, defs.spacing, self.splitter)
self.setLayout(self.main_layout)
self.resize(658, 350)
connect_button(self.button_close, self.accept)
connect_button(self.button_compare, self.compare)
self.connect(self.diff_files,
SIGNAL('itemDoubleClicked(QTreeWidgetItem*,int)'),
self.compare)
self.connect(self.left_combo,
SIGNAL('currentIndexChanged(int)'),
lambda x: self.update_combo_boxes(left=True))
self.connect(self.right_combo,
SIGNAL('currentIndexChanged(int)'),
lambda x: self.update_combo_boxes(left=False))
self.connect(self.left_list,
SIGNAL('itemSelectionChanged()'), self.update_diff_files)
self.connect(self.right_list,
SIGNAL('itemSelectionChanged()'), self.update_diff_files)
self.update_combo_boxes(left=True)
self.update_combo_boxes(left=False)
# Pre-select the 0th elements
item = self.left_list.item(0)
if item:
self.left_list.setCurrentItem(item)
self.left_list.setItemSelected(item, True)
item = self.right_list.item(0)
if item:
self.right_list.setCurrentItem(item)
self.right_list.setItemSelected(item, True)
def selection(self):
left_item = self.left_list.currentItem()
if left_item and left_item.isSelected():
left_item = ustr(left_item.text())
else:
left_item = None
right_item = self.right_list.currentItem()
if right_item and right_item.isSelected():
right_item = ustr(right_item.text())
else:
right_item = None
return (left_item, right_item)
def update_diff_files(self, *rest):
"""Updates the list of files whenever the selection changes"""
# Left and Right refer to the comparison pair (l,r)
left_item, right_item = self.selection()
if (not left_item or not right_item or
left_item == right_item):
self.set_diff_files([])
return
left_item = self.remote_ref(left_item)
right_item = self.remote_ref(right_item)
# If any of the selection includes sandbox then we
# generate the same diff, regardless. This means we don't
# support reverse diffs against sandbox aka worktree.
if self.SANDBOX in (left_item, right_item):
self.use_sandbox = True
if left_item == self.SANDBOX:
self.diff_arg = (right_item,)
else:
self.diff_arg = (left_item,)
else:
self.diff_arg = (left_item, right_item)
self.use_sandbox = False
# start and end as in 'git diff start end'
self.start = left_item
self.end = right_item
if len(self.diff_arg) == 1:
files = gitcmds.diff_index_filenames(self.diff_arg[0])
else:
files = gitcmds.diff_filenames(*self.diff_arg)
self.set_diff_files(files)
def set_diff_files(self, files):
mk = FileItem
icon = icons.file_code()
self.diff_files.clear()
self.diff_files.addTopLevelItems([mk(f, icon) for f in files])
def remote_ref(self, branch):
"""Returns the remote ref for 'git diff [local] [remote]'
"""
if branch == self.BRANCH_POINT:
# Compare against the branch point so find the merge-base
branch = gitcmds.current_branch()
tracked_branch = gitcmds.tracked_branch()
if tracked_branch:
return gitcmds.merge_base(branch, tracked_branch)
else:
remote_branches = gitcmds.branch_list(remote=True)
remote_branch = 'origin/%s' % branch
if remote_branch in remote_branches:
return gitcmds.merge_base(branch, remote_branch)
elif 'origin/master' in remote_branches:
return gitcmds.merge_base(branch, 'origin/master')
else:
return 'HEAD'
else:
# Compare against the remote branch
return branch
def update_combo_boxes(self, left=False):
"""Update listwidgets from the combobox selection
Update either the left or right listwidgets
to reflect the available items.
"""
if left:
which = ustr(self.left_combo.currentText())
widget = self.left_list
else:
which = ustr(self.right_combo.currentText())
widget = self.right_list
if not which:
return
# If we're looking at "local" stuff then provide the
# sandbox as a valid choice. If we're looking at
# "remote" stuff then also include the branch point.
if which == self.LOCAL:
new_list = ([self.SANDBOX]+ self.local_branches)
else:
new_list = ([self.BRANCH_POINT] + self.remote_branches)
widget.clear()
widget.addItems(new_list)
if new_list:
item = widget.item(0)
widget.setCurrentItem(item)
widget.setItemSelected(item, True)
def compare(self, *args):
"""Shows the diff for a specific file
"""
tree_widget = self.diff_files
item = tree_widget.currentItem()
if item and item.isSelected():
self.compare_file(item.path)
def compare_file(self, filename):
"""Initiates the difftool session"""
if self.use_sandbox:
left = self.diff_arg[0]
if len(self.diff_arg) > 1:
right = self.diff_arg[1]
else:
right = None
else:
left, right = self.start, self.end
difftool.launch(left=left, right=right, paths=[filename])
|
martinribelotta/micropython
|
refs/heads/master
|
tests/basics/fun2.py
|
119
|
# calling a function from a function
def f(x):
print(x + 1)
def g(x):
f(2 * x)
f(4 * x)
g(3)
|
jart/tensorflow
|
refs/heads/master
|
tensorflow/python/training/session_run_hook.py
|
25
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A SessionRunHook extends `session.run()` calls for the `MonitoredSession`.
SessionRunHooks are useful to track training, report progress, request early
stopping and more. SessionRunHooks use the observer pattern and notify at the
following points:
- when a session starts being used
- before a call to the `session.run()`
- after a call to the `session.run()`
- when the session closed
A SessionRunHook encapsulates a piece of reusable/composable computation that
can piggyback a call to `MonitoredSession.run()`. A hook can add any
ops-or-tensor/feeds to the run call, and when the run call finishes with success
gets the outputs it requested. Hooks are allowed to add ops to the graph in
`hook.begin()`. The graph is finalized after the `begin()` method is called.
There are a few pre-defined hooks:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
class ExampleHook(SessionRunHook):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def after_create_session(self, session, coord):
# When this is called, the graph is finalized and
# ops can no longer be added to the graph.
print('Session created.')
def before_run(self, run_context):
print('Before calling session.run().')
return SessionRunArgs(self.your_tensor)
def after_run(self, run_context, run_values):
print('Done running one step. The value of my tensor: %s',
run_values.results)
if you-need-to-stop-loop:
run_context.request_stop()
def end(self, session):
print('Done with the session.')
To understand how hooks interact with calls to `MonitoredSession.run()`,
look at following code:
with MonitoredTrainingSession(hooks=your_hooks, ...) as sess:
while not sess.should_stop():
sess.run(your_fetches)
Above user code leads to following execution:
call hooks.begin()
sess = tf.Session()
call hooks.after_create_session()
while not stop is requested:
call hooks.before_run()
try:
results = sess.run(merged_fetches, feed_dict=merged_feeds)
except (errors.OutOfRangeError, StopIteration):
break
call hooks.after_run()
call hooks.end()
sess.close()
Note that if sess.run() raises OutOfRangeError or StopIteration then
hooks.after_run() will not be called but hooks.end() will still be called.
If sess.run() raises any other exception then neither hooks.after_run() nor
hooks.end() will be called.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.SessionRunHook")
class SessionRunHook(object):
"""Hook to extend calls to MonitoredSession.run()."""
def begin(self):
"""Called once before using the session.
When called, the default graph is the one that will be launched in the
session. The hook can modify the graph by adding new operations to it.
After the `begin()` call the graph will be finalized and the other callbacks
can not modify the graph anymore. Second call of `begin()` on the same
graph, should not change the graph.
"""
pass
def after_create_session(self, session, coord): # pylint: disable=unused-argument
"""Called when new TensorFlow session is created.
This is called to signal the hooks that a new session has been created. This
has two essential differences with the situation in which `begin` is called:
* When this is called, the graph is finalized and ops can no longer be added
to the graph.
* This method will also be called as a result of recovering a wrapped
session, not only at the beginning of the overall session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
pass
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
You can return from this call a `SessionRunArgs` object indicating ops or
tensors to add to the upcoming `run()` call. These ops/tensors will be run
together with the ops/tensors originally passed to the original run() call.
The run args you return can also contain feeds to be added to the run()
call.
The `run_context` argument is a `SessionRunContext` that provides
information about the upcoming `run()` call: the originally requested
op/tensors, the TensorFlow Session.
At this point graph is finalized and you can not add ops.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return None
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values): # pylint: disable=unused-argument
"""Called after each call to run().
The `run_values` argument contains results of requested ops/tensors by
`before_run()`.
The `run_context` argument is the same one send to `before_run` call.
`run_context.request_stop()` can be called to stop the iteration.
If `session.run()` raises any exceptions then `after_run()` is not called.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
pass
def end(self, session): # pylint: disable=unused-argument
"""Called at the end of session.
The `session` argument can be used in case the hook wants to run final ops,
such as saving a last checkpoint.
If `session.run()` raises exception other than OutOfRangeError or
StopIteration then `end()` is not called.
Note the difference between `end()` and `after_run()` behavior when
`session.run()` raises OutOfRangeError or StopIteration. In that case
`end()` is called but `after_run()` is not called.
Args:
session: A TensorFlow Session that will be soon closed.
"""
pass
@tf_export("train.SessionRunArgs")
class SessionRunArgs(
collections.namedtuple("SessionRunArgs",
["fetches", "feed_dict", "options"])):
"""Represents arguments to be added to a `Session.run()` call.
Args:
fetches: Exactly like the 'fetches' argument to Session.Run().
Can be a single tensor or op, a list of 'fetches' or a dictionary
of fetches. For example:
fetches = global_step_tensor
fetches = [train_op, summary_op, global_step_tensor]
fetches = {'step': global_step_tensor, 'summ': summary_op}
Note that this can recurse as expected:
fetches = {'step': global_step_tensor,
'ops': [train_op, check_nan_op]}
feed_dict: Exactly like the `feed_dict` argument to `Session.Run()`
options: Exactly like the `options` argument to `Session.run()`, i.e., a
config_pb2.RunOptions proto.
"""
def __new__(cls, fetches, feed_dict=None, options=None):
return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)
@tf_export("train.SessionRunContext")
class SessionRunContext(object):
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
@tf_export("train.SessionRunValues")
class SessionRunValues(
collections.namedtuple("SessionRunValues",
["results", "options", "run_metadata"])):
"""Contains the results of `Session.run()`.
In the future we may use this object to add more information about result of
run without changing the Hook API.
Args:
results: The return values from `Session.run()` corresponding to the fetches
attribute returned in the RunArgs. Note that this has the same shape as
the RunArgs fetches. For example:
fetches = global_step_tensor
=> results = nparray(int)
fetches = [train_op, summary_op, global_step_tensor]
=> results = [None, nparray(string), nparray(int)]
fetches = {'step': global_step_tensor, 'summ': summary_op}
=> results = {'step': nparray(int), 'summ': nparray(string)}
options: `RunOptions` from the `Session.run()` call.
run_metadata: `RunMetadata` from the `Session.run()` call.
"""
|
stephane-martin/salt-debian-packaging
|
refs/heads/master
|
salt-2016.3.3/tests/unit/states/mysql_user_test.py
|
2
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import mysql_user
import salt
mysql_user.__salt__ = {}
mysql_user.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MysqlUserTestCase(TestCase):
'''
Test cases for salt.states.mysql_user
'''
# 'present' function tests: 1
def test_present(self):
'''
Test to ensure that the named user is present with
the specified properties.
'''
name = 'frank'
password = "bob@cat"
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[True, False, True, False, False, True,
False, False, False, False, False, True])
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_str = MagicMock(return_value='salt')
mock_none = MagicMock(return_value=None)
mock_sn = MagicMock(side_effect=[None, 'salt', None, None, None])
with patch.object(salt.utils, 'is_true', mock_f):
comt = ('Either password or password_hash must be specified,'
' unless allow_passwordless is True')
ret.update({'comment': comt})
self.assertDictEqual(mysql_user.present(name), ret)
with patch.dict(mysql_user.__salt__, {'mysql.user_exists': mock,
'mysql.user_chpass': mock_t}):
with patch.object(salt.utils, 'is_true', mock_t):
comt = ('User frank@localhost is already present'
' with passwordless login')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(mysql_user.present(name), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_str):
ret.update({'comment': 'salt', 'result': False})
self.assertDictEqual(mysql_user.present(name), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_str):
comt = ('User frank@localhost is already present'
' with the desired password')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(mysql_user.present(name,
password=password), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_str):
ret.update({'comment': 'salt', 'result': False})
self.assertDictEqual(mysql_user.present(name,
password=password),
ret)
with patch.object(mysql_user, '_get_mysql_error', mock_none):
with patch.dict(mysql_user.__opts__, {'test': True}):
comt = ('Password for user frank@localhost'
' is set to be changed')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(mysql_user.present
(name, password=password), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_sn):
with patch.dict(mysql_user.__opts__, {'test': False}):
ret.update({'comment': 'salt', 'result': False})
self.assertDictEqual(mysql_user.present
(name, password=password), ret)
with patch.dict(mysql_user.__opts__, {'test': True}):
comt = ('User frank@localhost is set to be added')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(mysql_user.present
(name, password=password), ret)
with patch.dict(mysql_user.__opts__, {'test': False}):
comt = ('Password for user frank@localhost'
' has been changed')
ret.update({'comment': comt, 'result': True,
'changes': {name: 'Updated'}})
self.assertDictEqual(mysql_user.present
(name, password=password), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure that the named user is absent.
'''
name = 'frank_exampledb'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[True, True, True, False, False, False])
mock_t = MagicMock(side_effect=[True, False])
mock_str = MagicMock(return_value='salt')
mock_none = MagicMock(return_value=None)
with patch.dict(mysql_user.__salt__, {'mysql.user_exists': mock,
'mysql.user_remove': mock_t}):
with patch.dict(mysql_user.__opts__, {'test': True}):
comt = ('User frank_exampledb@localhost is set to be removed')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(mysql_user.absent(name), ret)
with patch.dict(mysql_user.__opts__, {'test': False}):
comt = ('User frank_exampledb@localhost has been removed')
ret.update({'comment': comt, 'result': True,
'changes': {'frank_exampledb': 'Absent'}})
self.assertDictEqual(mysql_user.absent(name), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_str):
comt = ('User frank_exampledb@localhost has been removed')
ret.update({'comment': 'salt', 'result': False,
'changes': {}})
self.assertDictEqual(mysql_user.absent(name), ret)
comt = ('User frank_exampledb@localhost has been removed')
ret.update({'comment': 'salt'})
self.assertDictEqual(mysql_user.absent(name), ret)
with patch.object(mysql_user, '_get_mysql_error', mock_none):
comt = ('User frank_exampledb@localhost is not present,'
' so it cannot be removed')
ret.update({'comment': comt, 'result': True,
'changes': {}})
self.assertDictEqual(mysql_user.absent(name), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(MysqlUserTestCase, needs_daemon=False)
|
Batterfii/django
|
refs/heads/master
|
tests/timezones/urls.py
|
406
|
from django.conf.urls import url
from . import admin as tz_admin # NOQA: register tz_admin
urlpatterns = [
url(r'^admin/', tz_admin.site.urls),
]
|
EndingCredits/PyGame-Learning-Environment
|
refs/heads/master
|
setup.py
|
3
|
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
install_requires = [
"numpy",
"Pillow"
]
setup(
name='ple',
version='0.0.1',
description='PyGame Learning Environment',
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
url='https://github.com/ntasfi/PyGame-Learning-Environment',
author='Norman Tasfi',
author_email='first letter of first name plus last at googles email service.',
keywords='',
license="MIT",
packages=find_packages(),
include_package_data=False,
zip_safe=False,
install_requires=install_requires
)
|
formalmethods/intrepyd
|
refs/heads/master
|
examples/counter/counter_exe.py
|
1
|
import intrepyd as ip
import intrepyd.components
import intrepyd.trace
if __name__ == "__main__":
ctx = ip.Context()
int8type = ctx.mk_int8_type()
ten = ctx.mk_number("10", int8type)
counter, Q = ip.components.mk_counter(ctx, "counter", type=int8type, limit=ten)
simulator = ctx.mk_simulator()
tr = ctx.mk_trace()
simulator.add_watch(counter)
simulator.add_watch(Q)
simulator.simulate(tr, 12)
df = tr.get_as_dataframe(ctx.net2name)
print df
|
chand3040/cloud_that
|
refs/heads/named-release/cypress.rc
|
common/djangoapps/course_modes/views.py
|
18
|
"""
Views for the course_mode module
"""
import decimal
from ipware.ip import get_ip
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.views.generic.base import View
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from edxmako.shortcuts import render_to_response
from course_modes.models import CourseMode
from courseware.access import has_access
from student.models import CourseEnrollment
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from util.db import commit_on_success_with_read_committed
from xmodule.modulestore.django import modulestore
from embargo import api as embargo_api
class ChooseModeView(View):
"""View used when the user is asked to pick a mode.
When a get request is used, shows the selection page.
When a post request is used, assumes that it is a form submission
from the selection page, parses the response, and then sends user
to the next step in the flow.
"""
@method_decorator(login_required)
def get(self, request, course_id, error=None):
"""Displays the course mode choice page.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Keyword Args:
error (unicode): If provided, display this error message
on the page.
Returns:
Response
"""
course_key = CourseKey.from_string(course_id)
# Check whether the user has access to this course
# based on country access rules.
embargo_redirect = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if embargo_redirect:
return redirect(embargo_redirect)
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(request.user, course_key)
modes = CourseMode.modes_for_course_dict(course_key)
# We assume that, if 'professional' is one of the modes, it is the *only* mode.
# If we offer more modes alongside 'professional' in the future, this will need to route
# to the usual "choose your track" page same is true for no-id-professional mode.
has_enrolled_professional = (CourseMode.is_professional_slug(enrollment_mode) and is_active)
if CourseMode.has_professional_mode(modes) and not has_enrolled_professional:
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
# If there isn't a verified mode available, then there's nothing
# to do on this page. The user has almost certainly been auto-registered
# in the "honor" track by this point, so we send the user
# to the dashboard.
if not CourseMode.has_verified_mode(modes):
return redirect(reverse('dashboard'))
# If a user has already paid, redirect them to the dashboard.
if is_active and (enrollment_mode in CourseMode.VERIFIED_MODES + [CourseMode.NO_ID_PROFESSIONAL_MODE]):
return redirect(reverse('dashboard'))
donation_for_course = request.session.get("donation_for_course", {})
chosen_price = donation_for_course.get(unicode(course_key), None)
course = modulestore().get_course(course_key)
# When a credit mode is available, students will be given the option
# to upgrade from a verified mode to a credit mode at the end of the course.
# This allows students who have completed photo verification to be eligible
# for univerity credit.
# Since credit isn't one of the selectable options on the track selection page,
# we need to check *all* available course modes in order to determine whether
# a credit mode is available. If so, then we show slightly different messaging
# for the verified track.
has_credit_upsell = any(
CourseMode.is_credit_mode(mode) for mode
in CourseMode.modes_for_course(course_key, only_selectable=False)
)
context = {
"course_modes_choose_url": reverse("course_modes_choose", kwargs={'course_id': course_key.to_deprecated_string()}),
"modes": modes,
"has_credit_upsell": has_credit_upsell,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"chosen_price": chosen_price,
"error": error,
"responsive": True
}
if "verified" in modes:
context["suggested_prices"] = [
decimal.Decimal(x.strip())
for x in modes["verified"].suggested_prices.split(",")
if x.strip()
]
context["currency"] = modes["verified"].currency.upper()
context["min_price"] = modes["verified"].min_price
context["verified_name"] = modes["verified"].name
context["verified_description"] = modes["verified"].description
return render_to_response("course_modes/choose.html", context)
@method_decorator(login_required)
@method_decorator(commit_on_success_with_read_committed)
def post(self, request, course_id):
"""Takes the form submission from the page and parses it.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Returns:
Status code 400 when the requested mode is unsupported. When the honor mode
is selected, redirects to the dashboard. When the verified mode is selected,
returns error messages if the indicated contribution amount is invalid or
below the minimum, otherwise redirects to the verification flow.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = request.user
# This is a bit redundant with logic in student.views.change_enrollment,
# but I don't really have the time to refactor it more nicely and test.
course = modulestore().get_course(course_key)
if not has_access(user, 'enroll', course):
error_msg = _("Enrollment is closed")
return self.get(request, course_id, error=error_msg)
requested_mode = self._get_requested_mode(request.POST)
allowed_modes = CourseMode.modes_for_course_dict(course_key)
if requested_mode not in allowed_modes:
return HttpResponseBadRequest(_("Enrollment mode not supported"))
if requested_mode == 'honor':
# The user will have already been enrolled in the honor mode at this
# point, so we just redirect them to the dashboard, thereby avoiding
# hitting the database a second time attempting to enroll them.
return redirect(reverse('dashboard'))
mode_info = allowed_modes[requested_mode]
if requested_mode == 'verified':
amount = request.POST.get("contribution") or \
request.POST.get("contribution-other-amt") or 0
try:
# Validate the amount passed in and force it into two digits
amount_value = decimal.Decimal(amount).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
error_msg = _("Invalid amount selected.")
return self.get(request, course_id, error=error_msg)
# Check for minimum pricing
if amount_value < mode_info.min_price:
error_msg = _("No selected price or selected price is too low.")
return self.get(request, course_id, error=error_msg)
donation_for_course = request.session.get("donation_for_course", {})
donation_for_course[unicode(course_key)] = amount_value
request.session["donation_for_course"] = donation_for_course
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
def _get_requested_mode(self, request_dict):
"""Get the user's requested mode
Args:
request_dict (`QueryDict`): A dictionary-like object containing all given HTTP POST parameters.
Returns:
The course mode slug corresponding to the choice in the POST parameters,
None if the choice in the POST parameters is missing or is an unsupported mode.
"""
if 'verified_mode' in request_dict:
return 'verified'
if 'honor_mode' in request_dict:
return 'honor'
else:
return None
def create_mode(request, course_id):
"""Add a mode to the course corresponding to the given course ID.
Only available when settings.FEATURES['MODE_CREATION_FOR_TESTING'] is True.
Attempts to use the following querystring parameters from the request:
`mode_slug` (str): The mode to add, either 'honor', 'verified', or 'professional'
`mode_display_name` (str): Describes the new course mode
`min_price` (int): The minimum price a user must pay to enroll in the new course mode
`suggested_prices` (str): Comma-separated prices to suggest to the user.
`currency` (str): The currency in which to list prices.
By default, this endpoint will create an 'honor' mode for the given course with display name
'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency.
Args:
request (`Request`): The Django Request object.
course_id (unicode): A course ID.
Returns:
Response
"""
PARAMETERS = {
'mode_slug': u'honor',
'mode_display_name': u'Honor Code Certificate',
'min_price': 0,
'suggested_prices': u'',
'currency': u'usd',
}
# Try pulling querystring parameters out of the request
for parameter, default in PARAMETERS.iteritems():
PARAMETERS[parameter] = request.GET.get(parameter, default)
# Attempt to create the new mode for the given course
course_key = CourseKey.from_string(course_id)
CourseMode.objects.get_or_create(course_id=course_key, **PARAMETERS)
# Return a success message and a 200 response
return HttpResponse("Mode '{mode_slug}' created for '{course}'.".format(
mode_slug=PARAMETERS['mode_slug'],
course=course_id
))
|
yorung/fastbirdEngine
|
refs/heads/master
|
Code/Utility/export_layers_info.py
|
1
|
#!/usr/bin/env python
# This codes is a part of the open source game engine called fastbird-engine.
# Auothor : fastbird(jungwan82@naver.com)
# HOW TO USE
# 1. Put this file in the plug-in folder
# Maybe C:\Users\yourname\.gimp-2.8/plug-ins for Windows.
# Or you can check in the dialog which you can open from the menu
# Edit->Preferences->Folders->Plug-Ins
# 2. If you are running Gimp, Restart it.
# 3. Now you can find the new menu in Edit->Export Layers Info..
from gimpfu import *
import gimp
import xml.etree.ElementTree as ET
# Configuration
# Relative to the folder which running the game executable file game.exe
gXMLImagePath = "data/textures/gameui.dds"
gXMLPath = "D:\\projects\\FBGame1\\data\\textures\\gameui.xml"
gDDSPath = "D:\\projects\\FBGame1\\data\\textures\\gameui.dds"
def indent(elem, level=0):
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for d in elem:
indent(d, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def write_layer(layer, elem, id):
subElem = ET.SubElement(elem, "region")
subElem.set("ID", "{}".format(id))
subElem.set("name", layer.name)
subElem.set("x", "{}".format(layer.offsets[0]))
subElem.set("y", "{}".format(layer.offsets[1]))
subElem.set("width", "{}".format(layer.width))
subElem.set("height", "{}".format(layer.height))
def export_layers_info(image, drawable, filepath, ddspath, ddspathInXml):
rootElem = ET.Element("TextureAtlas")
rootElem.set("file", ddspathInXml)
id = 1;
for layer in image.layers:
write_layer(layer, rootElem, id)
id += 1
indent(rootElem)
tree = ET.ElementTree(rootElem)
tree.write(filepath)
dplimg = image.duplicate()
dplimg.merge_visible_layers(1) # EXPAND-AS-NECESSARY (0), CLIP-TO-IMAGE (1), CLIP-TO-BOTTOM-LAYER (2)
#Compression format (0 = None, 1 = BC1/DXT1, 2 = BC2/DXT3, 3 = BC3/DXT5, 4 = BC3n/DXT5nm,
#5 = BC4/ATI1N, 6 = BC5/ATI2N, 7 = RXGB (DXT5), 8 = Alpha Exponent (DXT5), 9 = YCoCg (DXT5), 10 = YCoCg scaled (DXT5))
compression_format = 0
mipmaps = 0
savetype = 0
format = 0
transparent_index = -1
color_type=0
dither = 0
mipmap_filter=0
gamma_correct=0
gamma = 2.2
dplimg.active_layer.resize_to_image_size()
gimp.pdb.file_dds_save(dplimg, dplimg.active_layer, ddspath, ddspath, compression_format, mipmaps, savetype, format, transparent_index, color_type, dither, mipmap_filter, gamma_correct, gamma)
gimp.delete(dplimg)
register(
"python_fu_export_layers_info",
"Export layers information for fastbird-engine", "Id, name, position and size will be imported.",
"fastbird", "fastbird", "2014",
"Export Layers Info..",
"*",
[
(PF_IMAGE, "image", "Image", None),
(PF_DRAWABLE, "drawable", "Drawable", None),
(PF_FILE , "filepath", "File Path", gXMLPath),
(PF_FILE , "ddspath", "DDS File Path", gDDSPath),
(PF_STRING , "ddspathInXml", "DDS File Path In Xml", gXMLImagePath),
],
[],
export_layers_info, menu="<Image>/Tools")
main()
|
cgvarela/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/core/_end.py
|
1
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementation of base.End."""
import abc
import threading
import uuid
from grpc.framework.core import _operation
from grpc.framework.core import _utilities
from grpc.framework.foundation import callable_util
from grpc.framework.foundation import later
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.base import base
from grpc.framework.interfaces.links import links
from grpc.framework.interfaces.links import utilities
_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
class End(base.End, links.Link):
"""A bridge between base.End and links.Link.
Implementations of this interface translate arriving tickets into
calls on application objects implementing base interfaces and
translate calls from application objects implementing base interfaces
into tickets sent to a joined link.
"""
__metaclass__ = abc.ABCMeta
class _Cycle(object):
"""State for a single start-stop End lifecycle."""
def __init__(self, pool):
self.pool = pool
self.grace = False
self.futures = []
self.operations = {}
self.idle_actions = []
def _abort(operations):
for operation in operations:
operation.abort(base.Outcome.LOCAL_SHUTDOWN)
def _cancel_futures(futures):
for future in futures:
future.cancel()
def _future_shutdown(lock, cycle, event):
def in_future():
with lock:
_abort(cycle.operations.values())
_cancel_futures(cycle.futures)
return in_future
def _termination_action(lock, stats, operation_id, cycle):
"""Constructs the termination action for a single operation.
Args:
lock: A lock to hold during the termination action.
states: A mapping from base.Outcome values to integers to increment with
the outcome given to the termination action.
operation_id: The operation ID for the termination action.
cycle: A _Cycle value to be updated during the termination action.
Returns:
A callable that takes an operation outcome as its sole parameter and that
should be used as the termination action for the operation associated
with the given operation ID.
"""
def termination_action(outcome):
with lock:
stats[outcome] += 1
cycle.operations.pop(operation_id, None)
if not cycle.operations:
for action in cycle.idle_actions:
cycle.pool.submit(action)
cycle.idle_actions = []
if cycle.grace:
_cancel_futures(cycle.futures)
cycle.pool.shutdown(wait=False)
return termination_action
class _End(End):
"""An End implementation."""
def __init__(self, servicer_package):
"""Constructor.
Args:
servicer_package: A _ServicerPackage for servicing operations or None if
this end will not be used to service operations.
"""
self._lock = threading.Condition()
self._servicer_package = servicer_package
self._stats = {outcome: 0 for outcome in base.Outcome}
self._mate = None
self._cycle = None
def start(self):
"""See base.End.start for specification."""
with self._lock:
if self._cycle is not None:
raise ValueError('Tried to start a not-stopped End!')
else:
self._cycle = _Cycle(logging_pool.pool(1))
def stop(self, grace):
"""See base.End.stop for specification."""
with self._lock:
if self._cycle is None:
event = threading.Event()
event.set()
return event
elif not self._cycle.operations:
event = threading.Event()
self._cycle.pool.submit(event.set)
self._cycle.pool.shutdown(wait=False)
self._cycle = None
return event
else:
self._cycle.grace = True
event = threading.Event()
self._cycle.idle_actions.append(event.set)
if 0 < grace:
future = later.later(
grace, _future_shutdown(self._lock, self._cycle, event))
self._cycle.futures.append(future)
else:
_abort(self._cycle.operations.values())
return event
def operate(
self, group, method, subscription, timeout, initial_metadata=None,
payload=None, completion=None):
"""See base.End.operate for specification."""
operation_id = uuid.uuid4()
with self._lock:
if self._cycle is None or self._cycle.grace:
raise ValueError('Can\'t operate on stopped or stopping End!')
termination_action = _termination_action(
self._lock, self._stats, operation_id, self._cycle)
operation = _operation.invocation_operate(
operation_id, group, method, subscription, timeout, initial_metadata,
payload, completion, self._mate.accept_ticket, termination_action,
self._cycle.pool)
self._cycle.operations[operation_id] = operation
return operation.context, operation.operator
def operation_stats(self):
"""See base.End.operation_stats for specification."""
with self._lock:
return dict(self._stats)
def add_idle_action(self, action):
"""See base.End.add_idle_action for specification."""
with self._lock:
if self._cycle is None:
raise ValueError('Can\'t add idle action to stopped End!')
action_with_exceptions_logged = callable_util.with_exceptions_logged(
action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE)
if self._cycle.operations:
self._cycle.idle_actions.append(action_with_exceptions_logged)
else:
self._cycle.pool.submit(action_with_exceptions_logged)
def accept_ticket(self, ticket):
"""See links.Link.accept_ticket for specification."""
with self._lock:
if self._cycle is not None:
operation = self._cycle.operations.get(ticket.operation_id)
if operation is not None:
operation.handle_ticket(ticket)
elif self._servicer_package is not None and not self._cycle.grace:
termination_action = _termination_action(
self._lock, self._stats, ticket.operation_id, self._cycle)
operation = _operation.service_operate(
self._servicer_package, ticket, self._mate.accept_ticket,
termination_action, self._cycle.pool)
if operation is not None:
self._cycle.operations[ticket.operation_id] = operation
def join_link(self, link):
"""See links.Link.join_link for specification."""
with self._lock:
self._mate = utilities.NULL_LINK if link is None else link
def serviceless_end_link():
"""Constructs an End usable only for invoking operations.
Returns:
An End usable for translating operations into ticket exchange.
"""
return _End(None)
def serviceful_end_link(servicer, default_timeout, maximum_timeout):
"""Constructs an End capable of servicing operations.
Args:
servicer: An interfaces.Servicer for servicing operations.
default_timeout: A length of time in seconds to be used as the default
time alloted for a single operation.
maximum_timeout: A length of time in seconds to be used as the maximum
time alloted for a single operation.
Returns:
An End capable of servicing the operations requested of it through ticket
exchange.
"""
return _End(
_utilities.ServicerPackage(servicer, default_timeout, maximum_timeout))
|
benroeder/moviepy
|
refs/heads/master
|
moviepy/config.py
|
15
|
import os
import subprocess as sp
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
if os.name == 'nt':
try:
import winreg as wr # py3k
except:
import _winreg as wr # py2k
from .config_defaults import (FFMPEG_BINARY, IMAGEMAGICK_BINARY)
def try_cmd(cmd):
try:
popen_params = { "stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL
}
# This was added so that no extra unwanted window opens on windows
# when the child process is created
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
proc.communicate()
except Exception as err:
return False, err
else:
return True, None
if FFMPEG_BINARY=='ffmpeg-imageio':
from imageio.plugins.ffmpeg import get_exe
FFMPEG_BINARY = get_exe()
elif FFMPEG_BINARY=='auto-detect':
if try_cmd(['ffmpeg'])[0]:
FFMPEG_BINARY = 'ffmpeg'
elif try_cmd(['ffmpeg.exe'])[0]:
FFMPEG_BINARY = 'ffmpeg.exe'
else:
FFMPEG_BINARY = 'unset'
else:
success, err = try_cmd([FFMPEG_BINARY])
if not success:
raise IOError(err.message +
"The path specified for the ffmpeg binary might be wrong")
if IMAGEMAGICK_BINARY=='auto-detect':
if os.name == 'nt':
try:
key = wr.OpenKey(wr.HKEY_LOCAL_MACHINE, 'SOFTWARE\\ImageMagick\\Current')
IMAGEMAGICK_BINARY = wr.QueryValueEx(key, 'BinPath')[0] + r"\convert.exe"
key.Close()
except:
IMAGEMAGICK_BINARY = 'unset'
elif try_cmd(['convert'])[0]:
IMAGEMAGICK_BINARY = 'convert'
else:
IMAGEMAGICK_BINARY = 'unset'
else:
success, err = try_cmd([IMAGEMAGICK_BINARY])
if not success:
raise IOError(err.message +
"The path specified for the ImageMagick binary might be wrong")
def get_setting(varname):
""" Returns the value of a configuration variable. """
gl = globals()
if varname not in gl.keys():
raise ValueError("Unknown setting %s"%varname)
# Here, possibly add some code to raise exceptions if some
# parameter isn't set set properly, explaining on how to set it.
return gl[varname]
def change_settings(new_settings={}, file=None):
""" Changes the value of configuration variables."""
gl = globals()
if file is not None:
execfile(file)
gl.update(locals())
gl.update(new_settings)
# Here you can add some code to check that the new configuration
# values are valid.
if __name__ == "__main__":
if try_cmd([FFMPEG_BINARY])[0]:
print( "MoviePy : ffmpeg successfully found." )
else:
print( "MoviePy : can't find or access ffmpeg." )
if try_cmd([IMAGEMAGICK_BINARY])[0]:
print( "MoviePy : ImageMagick successfully found." )
else:
print( "MoviePy : can't find or access ImageMagick." )
|
SeanEstey/Bravo
|
refs/heads/stable
|
app/auth/manager.py
|
1
|
'''app.auth.manager'''
import base64
from bson.objectid import ObjectId
from flask import g, current_app
from flask_login import current_user, login_user, logout_user
from app import login_manager
from .user import User
from logging import getLogger
log = getLogger(__name__)
#-------------------------------------------------------------------------------
def login(username, pw):
db_user = User.authenticate(username, pw)
if not db_user:
log.debug('invalid login credentials for <%s>', username)
raise Exception("Login failed. Invalid username or password")
login_user(
User(
db_user['user'],
name = db_user['name'],
_id = db_user['_id'],
group = db_user['group'],
admin = db_user['admin']))
log.debug('%s logged in', username)
#-------------------------------------------------------------------------------
def logout():
log.debug('%s logged out', current_user.user_id)
rv = logout_user()
#-------------------------------------------------------------------------------
@login_manager.user_loader
def load_user(user_id):
db = current_app.db_client['bravo']
db_user = db.users.find_one({'user': user_id})
if not db_user:
log.debug('cant load user_id=%s', user_id)
return None
return User(
user_id,
name=db_user['name'],
_id=db_user['_id'],
group=db_user['group'],
admin=db_user['admin'])
#-------------------------------------------------------------------------------
@login_manager.request_loader
def load_api_user(request):
api_key = request.headers.get('Authorization')
if not api_key:
return None
api_key = api_key.replace('Basic ', '', 1)
try:
api_key = base64.b64decode(api_key)
except TypeError:
log.debug('base64 decode error, desc=%s', str(e))
return None
api_user = api_key.split(':')[1]
db = current_app.db_client['bravo']
user = db.users.find_one({'api_key':str(api_key)})
if user:
#print 'loaded api_user %s, group %s' %(user['name'], user['group'])
return User(
user['user'],
name = user['name'],
_id = user['_id'],
group = user['group'],
admin = user['admin'])
else:
return None
|
smartscheduling/scikit-learn-categorical-tree
|
refs/heads/master
|
sklearn/utils/tests/test_shortest_path.py
|
42
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
|
p0cisk/Quantum-GIS
|
refs/heads/master
|
python/plugins/processing/modeler/AddModelFromFileAction.py
|
5
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2014'
__copyright__ = '(C) 201, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import shutil
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QFileDialog, QMessageBox
from qgis.PyQt.QtCore import QSettings, QFileInfo
from processing.gui.ToolboxAction import ToolboxAction
from processing.modeler.ModelerAlgorithm import ModelerAlgorithm
from processing.modeler.WrongModelException import WrongModelException
from processing.modeler.ModelerUtils import ModelerUtils
from processing.core.alglist import algList
pluginPath = os.path.split(os.path.dirname(__file__))[0]
class AddModelFromFileAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Add model from file')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'model.png'))
def execute(self):
settings = QSettings()
lastDir = settings.value('Processing/lastModelsDir', '')
filename, selected_filter = QFileDialog.getOpenFileName(self.toolbox,
self.tr('Open model', 'AddModelFromFileAction'), lastDir,
self.tr('Processing model files (*.model *.MODEL)', 'AddModelFromFileAction'))
if filename:
try:
settings.setValue('Processing/lastModelsDir',
QFileInfo(filename).absoluteDir().absolutePath())
ModelerAlgorithm.fromFile(filename)
except WrongModelException:
QMessageBox.warning(
self.toolbox,
self.tr('Error reading model', 'AddModelFromFileAction'),
self.tr('The selected file does not contain a valid model', 'AddModelFromFileAction'))
return
except:
QMessageBox.warning(self.toolbox,
self.tr('Error reading model', 'AddModelFromFileAction'),
self.tr('Cannot read file', 'AddModelFromFileAction'))
return
destFilename = os.path.join(ModelerUtils.modelsFolders()[0], os.path.basename(filename))
shutil.copyfile(filename, destFilename)
algList.reloadProvider('model')
|
Rav3nPL/bitcoin
|
refs/heads/master
|
test/functional/wallet-hd.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
import os
class WalletHDTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
|
Tasignotas/topographica_mirror
|
refs/heads/master
|
topo/submodel/earlyvision.py
|
2
|
"""
Contains a variety of sensory models, specifically models for the
visual pathway.
"""
import topo
import param
import numbergen
import lancet
import numpy
import imagen
from imagen.patterncoordinator import PatternCoordinator, PatternCoordinatorImages
from topo.base.arrayutil import DivideWithConstant
from topo.submodel import Model, ArraySpec # pyflakes:ignore (API import)
from topo import sheet, transferfn
from collections import OrderedDict
class SensoryModel(Model):
dims = param.List(default=['xy'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates""")
num_inputs = param.Integer(default=2,bounds=(1,None),doc="""
How many input patterns to present per unit area at each
iteration, when using discrete patterns (e.g. Gaussians).""")
class VisualInputModel(SensoryModel):
allowed_dims = ['xy', 'or', 'od', 'dy', 'dr', 'sf']
period = param.Number(default=None, allow_None=True, doc="""
Simulation time between pattern updates on the generator
sheets. If None, the model is allowed to compute an appropriate
value for the period property (a period of 1.0 is typical)""")
dataset = param.ObjectSelector(default='Gaussian',objects=
['Gaussian','Nature','FoliageA','FoliageB'],doc="""
Set of input patterns to use::
:'Gaussian': Two-dimensional Gaussians
:'Nature': Shouval's 1999 monochrome 256x256 images
:'FoliageA': McGill calibrated LMS foliage/ image subset (5)
:'FoliageB': McGill calibrated LMS foliage/ image subset (25)""")
dims = param.List(default=['xy','or'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates
:'or': Orientation
:'od': Ocular dominance
:'dy': Disparity
:'dr': Direction of motion
:'sf': Spatial frequency
:'cr': Color (if available, see submodels.color)""")
area = param.Number(default=1.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
Linear size of cortical area to simulate.
2.0 gives a 2.0x2.0 Sheet area in V1.""")
dim_fraction = param.Number(default=0.7,bounds=(0.0,1.0),doc="""
Fraction by which the input brightness varies between the two
eyes. Only used if 'od' in 'dims'.""")
contrast=param.Number(default=70, bounds=(0,100),doc="""
Brightness of the input patterns as a contrast (percent). Only
used if 'od' not in 'dims'.""")
sf_spacing=param.Number(default=2.0,bounds=(1,None),doc="""
Determines the factor by which successive SF channels increase
in size. Only used if 'sf' in 'dims'.""")
sf_channels=param.Integer(default=2,bounds=(1,None),softbounds=(1,4),doc="""
Number of spatial frequency channels. Only used if 'sf' in 'dims'.""")
max_disparity = param.Number(default=4.0,bounds=(0,None),doc="""
Maximum disparity between input pattern positions in the left
and right eye. Only used if 'dy' in 'dims'.""")
num_lags = param.Integer(default=4, bounds=(1,None),doc="""
Number of successive frames before showing a new input
pattern. This also determines the number of connections
between each individual LGN sheet and V1. Only used if 'dr' in
'dims'.""")
speed=param.Number(default=2.0/24.0,bounds=(0,None),
softbounds=(0,3.0/24.0),doc="""
Distance in sheet coordinates between successive frames, when
translating patterns. Only used if 'dr' in 'dims'.""")
align_orientations = param.Boolean(default=None,
allow_None=True, doc="""
Whether or not to align pattern orientations together if
composing multiple patterns together. If None,
align_orientations will be set to True when speed is non-zero
(and 'dr' in dims), otherwise it is set to False.""")
__abstract = True
def property_setup(self, properties):
disallowed_dims = [dim for dim in self.dims if dim not in self.allowed_dims]
if disallowed_dims:
raise Exception('%s not in the list of allowed dimensions'
% ','.join(repr(d) for d in disallowed_dims))
properties = super(VisualInputModel, self).property_setup(properties)
# The default period for most Topographica models is 1.0
properties['period'] = 1.0 if self.period is None else self.period
properties['binocular'] = 'od' in self.dims or 'dy' in self.dims
properties['SF']=range(1,self.sf_channels+1) if 'sf' in self.dims else [1]
properties['lags'] = range(self.num_lags) if 'dr' in self.dims else [0]
if 'dr' in self.dims and not numbergen.RandomDistribution.time_dependent:
numbergen.RandomDistribution.time_dependent = True
self.message('Setting time_dependent to True for motion model.')
return properties
def training_pattern_setup(self, **overrides):
# all the below will eventually end up in PatternCoordinator!
disparity_bound = 0.0
position_bound_x = self.area/2.0+0.25
position_bound_y = self.area/2.0+0.25
if 'dy' in self.dims:
disparity_bound = self.max_disparity*0.041665/2.0
#TFALERT: Formerly: position_bound_x = self.area/2.0+0.2
position_bound_x -= disparity_bound
align_orientations = (bool(self.speed) and ('dr' in self.dims)
if self.align_orientations is None
else self.align_orientations)
if 'dr' in self.dims:
position_bound_x+=self.speed*max(self['lags'])
position_bound_y+=self.speed*max(self['lags'])
pattern_labels=['LeftRetina','RightRetina'] if self['binocular'] else ['Retina']
# all the above will eventually end up in PatternCoordinator!
params = dict(features_to_vary=self.dims,
pattern_labels=pattern_labels,
pattern_parameters={'size': 0.088388 if 'or' in self.dims and self.dataset=='Gaussian' \
else 3*0.088388 if self.dataset=='Gaussian' else 10.0,
'aspect_ratio': 4.66667 if 'or' in self.dims else 1.0,
'scale': self.contrast/100.0},
disparity_bound=disparity_bound,
position_bound_x=position_bound_x,
position_bound_y=position_bound_y,
dim_fraction=self.dim_fraction,
reset_period=(max(self['lags'])*self['period'] + self['period']),
speed=self.speed,
align_orientations = align_orientations,
sf_spacing=self.sf_spacing,
sf_max_channel=max(self['SF']),
patterns_per_label=int(self.num_inputs*self.area*self.area))
if self.dataset=='Gaussian':
return PatternCoordinator(**dict(params, **overrides))()
else:
image_folder = 'images/shouval' if self.dataset=='Nature' \
else 'images/mcgill/foliage_a_combined' if self.dataset=='FoliageA' \
else 'images/mcgill/foliage_b_combined' if self.dataset=='FoliageB' \
else None
return PatternCoordinatorImages(image_folder, **dict(params, **overrides))()
@Model.definition
class EarlyVisionModel(VisualInputModel):
retina_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the retina.""")
lgn_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the LGN.""")
lgnaff_strength = param.Number(default=2.33, doc="""
Overall strength of the afferent projection from the retina to
the LGN sheets.""")
lgnaff_radius=param.Number(default=0.375,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to units in
a retina sheet.""")
lgnlateral_radius=param.Number(default=0.5,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to
surrounding units, in case gain control is used.""")
v1aff_radius=param.Number(default=0.27083,bounds=(0,None),doc="""
Connection field radius of a unit in V1 to units in a LGN
sheet.""")
center_size = param.Number(default=0.07385,bounds=(0,None),doc="""
The size of the central Gaussian used to compute the
center-surround receptive field.""")
surround_size = param.Number(default=4*0.07385,bounds=(0,None),doc="""
The size of the surround Gaussian used to compute the
center-surround receptive field.""")
gain_control_size = param.Number(default=0.25,bounds=(0,None),doc="""
The size of the divisive inhibitory suppressive field used for
contrast-gain control in the LGN sheets. This also acts as the
corresponding bounds radius.""")
gain_control = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control.""")
gain_control_SF = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control across Spatial Frequency Sheets.""")
def property_setup(self, properties):
properties = super(EarlyVisionModel, self).property_setup(properties)
sheet.SettlingCFSheet.joint_norm_fn = sheet.optimized.compute_joint_norm_totals_opt
center_polarities=['On','Off']
# Useful for setting up sheets
properties['polarities'] = lancet.List('polarity', center_polarities)
properties['eyes'] = (lancet.List('eye', ['Left','Right'])
if properties['binocular'] else lancet.Identity())
properties['SFs'] = (lancet.List('SF', properties['SF'])
if max(properties['SF'])>1 else lancet.Identity())
return properties
def sheet_setup(self):
sheets = OrderedDict()
sheets['Retina'] = self['eyes']
sheets['LGN'] = self['polarities'] * self['eyes'] * self['SFs']
return sheets
@Model.GeneratorSheet
def Retina(self, properties):
return Model.GeneratorSheet.params(
period=self['period'],
phase=0.05,
nominal_density=self.retina_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnaff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnlateral_radius),
input_generator=self['training_patterns'][properties['eye']+'Retina'
if 'eye' in properties
else 'Retina'])
@Model.SettlingCFSheet
def LGN(self, properties):
channel=properties['SF'] if 'SF' in properties else 1
sf_aff_multiplier = self.sf_spacing**(max(self['SF'])-1) if self.gain_control_SF else \
self.sf_spacing**(channel-1)
gain_control = self.gain_control_SF if 'SF' in properties else self.gain_control
return Model.SettlingCFSheet.params(
mask = topo.base.projection.SheetMask(),
measure_maps=False,
output_fns=[transferfn.misc.HalfRectify()],
nominal_density=self.lgn_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius
* sf_aff_multiplier
+ self.lgnlateral_radius),
tsettle=2 if gain_control else 0,
strict_tsettle=1 if gain_control else 0)
@Model.matchconditions('LGN', 'afferent')
def afferent_conditions(self, properties):
return {'level': 'Retina', 'eye': properties.get('eye',None)}
@Model.SharedWeightCFProjection
def afferent(self, src_properties, dest_properties):
channel = dest_properties['SF'] if 'SF' in dest_properties else 1
centerg = imagen.Gaussian(size=self.center_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
surroundg = imagen.Gaussian(size=self.surround_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
on_weights = imagen.Composite(generators=[centerg,surroundg],operator=numpy.subtract)
off_weights = imagen.Composite(generators=[surroundg,centerg],operator=numpy.subtract)
return Model.SharedWeightCFProjection.params(
delay=0.05,
strength=self.lgnaff_strength,
name='Afferent',
nominal_bounds_template=sheet.BoundingBox(radius=self.lgnaff_radius
*self.sf_spacing**(channel-1)),
weights_generator=on_weights if dest_properties['polarity']=='On' else off_weights)
@Model.matchconditions('LGN', 'lateral_gain_control')
def lateral_gain_control_conditions(self, properties):
return ({'level': 'LGN', 'polarity':properties['polarity']}
if self.gain_control and self.gain_control_SF else
{'level': 'LGN', 'polarity':properties['polarity'],
'SF': properties.get('SF',None)}
if self.gain_control else None)
@Model.SharedWeightCFProjection
def lateral_gain_control(self, src_properties, dest_properties):
#TODO: Are those 0.25 the same as lgnlateral_radius/2.0?
name='LateralGC'
if 'eye' in src_properties:
name+=src_properties['eye']
if 'SF' in src_properties and self.gain_control_SF:
name+=('SF'+str(src_properties['SF']))
return Model.SharedWeightCFProjection.params(
delay=0.05,
dest_port=('Activity'),
activity_group=(0.6,DivideWithConstant(c=0.11)),
weights_generator=imagen.Gaussian(size=self.gain_control_size,
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()]),
nominal_bounds_template=sheet.BoundingBox(radius=self.gain_control_size),
name=name,
strength=0.6/(2 if self['binocular'] else 1))
|
alunduil/etest
|
refs/heads/master
|
etest_test/fixtures_test/scripts_test/e2a2b40a96df4dd093dc7f1c94e67eda.py
|
1
|
"""Assign empty string."""
import textwrap
from etest_test.fixtures_test.scripts_test import SCRIPTS
_ = {
"uuid": "e2a2b40a96df4dd093dc7f1c94e67eda",
"description": "assign empty string",
"text": textwrap.dedent(
"""
IUSE=""
""",
),
"symbols": {
"IUSE": "",
},
"correct": None,
}
SCRIPTS.setdefault("all", []).append(_)
SCRIPTS.setdefault("bash", []).append(_)
|
tseaver/google-cloud-python
|
refs/heads/master
|
speech/samples/v1p1beta1/speech_contexts_classes_beta.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_contexts_classes_beta")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Using Context Classes (Cloud Storage)
# description: Transcribe a short audio file with static context classes.
# usage: python3 samples/v1p1beta1/speech_contexts_classes_beta.py [--storage_uri "gs://cloud-samples-data/speech/time.mp3"] [--phrase "$TIME"]
# [START speech_contexts_classes_beta]
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
def sample_recognize(storage_uri, phrase):
"""
Transcribe a short audio file with static context classes.
Args:
storage_uri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
phrase Phrase "hints" help recognize the specified phrases from your audio.
In this sample we are using a static class phrase ($TIME).
Classes represent groups of words that represent common concepts
that occur in natural language.
"""
client = speech_v1p1beta1.SpeechClient()
# storage_uri = 'gs://cloud-samples-data/speech/time.mp3'
# phrase = '$TIME'
phrases = [phrase]
speech_contexts_element = {"phrases": phrases}
speech_contexts = [speech_contexts_element]
# The language of the supplied audio
language_code = "en-US"
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 24000
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.MP3
config = {
"speech_contexts": speech_contexts,
"language_code": language_code,
"sample_rate_hertz": sample_rate_hertz,
"encoding": encoding,
}
audio = {"uri": storage_uri}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_contexts_classes_beta]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--storage_uri", type=str, default="gs://cloud-samples-data/speech/time.mp3"
)
parser.add_argument("--phrase", type=str, default="$TIME")
args = parser.parse_args()
sample_recognize(args.storage_uri, args.phrase)
if __name__ == "__main__":
main()
|
robbiet480/home-assistant
|
refs/heads/dev
|
homeassistant/components/bbox/sensor.py
|
16
|
"""Support for Bbox Bouygues Modem Router."""
from datetime import timedelta
import logging
import pybbox
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_MONITORED_VARIABLES,
CONF_NAME,
DATA_RATE_MEGABITS_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Bouygues Telecom"
DEFAULT_NAME = "Bbox"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
# Sensor types are defined like so: Name, unit, icon
SENSOR_TYPES = {
"down_max_bandwidth": [
"Maximum Download Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:download",
],
"up_max_bandwidth": [
"Maximum Upload Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:upload",
],
"current_down_bandwidth": [
"Currently Used Download Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:download",
],
"current_up_bandwidth": [
"Currently Used Upload Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:upload",
],
"uptime": ["Uptime", None, "mdi:clock"],
"number_of_reboots": ["Number of reboot", None, "mdi:restart"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Bbox sensor."""
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data.
try:
bbox_data = BboxData()
bbox_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
return False
name = config[CONF_NAME]
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
if variable == "uptime":
sensors.append(BboxUptimeSensor(bbox_data, variable, name))
else:
sensors.append(BboxSensor(bbox_data, variable, name))
add_entities(sensors, True)
class BboxUptimeSensor(Entity):
"""Bbox uptime sensor."""
def __init__(self, bbox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.bbox_data = bbox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
def update(self):
"""Get the latest data from Bbox and update the state."""
self.bbox_data.update()
uptime = utcnow() - timedelta(
seconds=self.bbox_data.router_infos["device"]["uptime"]
)
self._state = uptime.replace(microsecond=0).isoformat()
class BboxSensor(Entity):
"""Implementation of a Bbox sensor."""
def __init__(self, bbox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.bbox_data = bbox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data from Bbox and update the state."""
self.bbox_data.update()
if self.type == "down_max_bandwidth":
self._state = round(self.bbox_data.data["rx"]["maxBandwidth"] / 1000, 2)
elif self.type == "up_max_bandwidth":
self._state = round(self.bbox_data.data["tx"]["maxBandwidth"] / 1000, 2)
elif self.type == "current_down_bandwidth":
self._state = round(self.bbox_data.data["rx"]["bandwidth"] / 1000, 2)
elif self.type == "current_up_bandwidth":
self._state = round(self.bbox_data.data["tx"]["bandwidth"] / 1000, 2)
elif self.type == "number_of_reboots":
self._state = self.bbox_data.router_infos["device"]["numberofboots"]
class BboxData:
"""Get data from the Bbox."""
def __init__(self):
"""Initialize the data object."""
self.data = None
self.router_infos = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Bbox."""
try:
box = pybbox.Bbox()
self.data = box.get_ip_stats()
self.router_infos = box.get_bbox_info()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
self.data = None
self.router_infos = None
return False
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/zope.interface-4.1.2/src/zope/interface/tests/advisory_testing.py
|
57
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import sys
from zope.interface.advice import addClassAdvisor
from zope.interface.advice import getFrameInfo
my_globals = globals()
def ping(log, value):
def pong(klass):
log.append((value,klass))
return [klass]
addClassAdvisor(pong)
try:
from types import ClassType
class ClassicClass:
__metaclass__ = ClassType
classLevelFrameInfo = getFrameInfo(sys._getframe())
except ImportError:
ClassicClass = None
class NewStyleClass:
__metaclass__ = type
classLevelFrameInfo = getFrameInfo(sys._getframe())
moduleLevelFrameInfo = getFrameInfo(sys._getframe())
|
leansoft/edx-platform
|
refs/heads/master
|
common/test/data/capa/prog1.py
|
270
|
# prog1
|
yanheven/glance
|
refs/heads/master
|
glance/search/plugins/metadefs_notification_handler.py
|
7
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log as logging
import oslo_messaging
from glance.common import utils
from glance.search.plugins import base
LOG = logging.getLogger(__name__)
class MetadefHandler(base.NotificationBase):
def __init__(self, *args, **kwargs):
super(MetadefHandler, self).__init__(*args, **kwargs)
self.namespace_delete_keys = ['deleted_at', 'deleted', 'created_at',
'updated_at', 'namespace_old']
self.property_delete_keys = ['deleted', 'deleted_at',
'name_old', 'namespace', 'name']
def process(self, ctxt, publisher_id, event_type, payload, metadata):
try:
actions = {
"metadef_namespace.create": self.create_ns,
"metadef_namespace.update": self.update_ns,
"metadef_namespace.delete": self.delete_ns,
"metadef_object.create": self.create_obj,
"metadef_object.update": self.update_obj,
"metadef_object.delete": self.delete_obj,
"metadef_property.create": self.create_prop,
"metadef_property.update": self.update_prop,
"metadef_property.delete": self.delete_prop,
"metadef_resource_type.create": self.create_rs,
"metadef_resource_type.delete": self.delete_rs,
"metadef_tag.create": self.create_tag,
"metadef_tag.update": self.update_tag,
"metadef_tag.delete": self.delete_tag,
"metadef_namespace.delete_properties": self.delete_props,
"metadef_namespace.delete_objects": self.delete_objects,
"metadef_namespace.delete_tags": self.delete_tags
}
actions[event_type](payload)
return oslo_messaging.NotificationResult.HANDLED
except Exception as e:
LOG.error(utils.exception_to_str(e))
def run_create(self, id, payload):
self.engine.create(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id
)
def run_update(self, id, payload, script=False):
if script:
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id)
else:
doc = {"doc": payload}
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=doc,
id=id)
def run_delete(self, id):
self.engine.delete(
index=self.index_name,
doc_type=self.document_type,
id=id
)
def create_ns(self, payload):
id = payload['namespace']
self.run_create(id, self.format_namespace(payload))
def update_ns(self, payload):
id = payload['namespace_old']
self.run_update(id, self.format_namespace(payload))
def delete_ns(self, payload):
id = payload['namespace']
self.run_delete(id)
def create_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.create_entity(id, "objects", object)
def update_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.update_entity(id, "objects", object,
payload['name_old'], "name")
def delete_obj(self, payload):
id = payload['namespace']
self.delete_entity(id, "objects", payload['name'], "name")
def create_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.create_entity(id, "properties", property)
def update_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.update_entity(id, "properties", property,
payload['name_old'], "property")
def delete_prop(self, payload):
id = payload['namespace']
self.delete_entity(id, "properties", payload['name'], "property")
def create_rs(self, payload):
id = payload['namespace']
resource_type = dict()
resource_type['name'] = payload['name']
if payload['prefix']:
resource_type['prefix'] = payload['prefix']
if payload['properties_target']:
resource_type['properties_target'] = payload['properties_target']
self.create_entity(id, "resource_types", resource_type)
def delete_rs(self, payload):
id = payload['namespace']
self.delete_entity(id, "resource_types", payload['name'], "name")
def create_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.create_entity(id, "tags", tag)
def update_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.update_entity(id, "tags", tag, payload['name_old'], "name")
def delete_tag(self, payload):
id = payload['namespace']
self.delete_entity(id, "tags", payload['name'], "name")
def delete_props(self, payload):
self.delete_field(payload, "properties")
def delete_objects(self, payload):
self.delete_field(payload, "objects")
def delete_tags(self, payload):
self.delete_field(payload, "tags")
def create_entity(self, id, entity, entity_data):
script = ("if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item }"
"else {ctx._source.%(entity)s=entity_list};" %
{"entity": entity})
params = {
"entity_item": entity_data,
"entity_list": [entity_data]
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def update_entity(self, id, entity, entity_data, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);"
"if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item; }"
"else {ctx._source.%(entity)s=entity_list;}" %
{"entity": entity, "field_name": field_name})
params = {
"entity_item": entity_data,
"entity_list": [entity_data],
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_entity(self, id, entity, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);" %
{"entity": entity, "field_name": field_name})
params = {
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_field(self, payload, field):
id = payload['namespace']
script = ("if (ctx._source.containsKey('%(field)s'))"
"{ctx._source.remove('%(field)s')}") % {"field": field}
payload = {"script": script}
self.run_update(id, payload=payload, script=True)
def format_namespace(self, payload):
for key in self.namespace_delete_keys:
if key in payload.keys():
del payload[key]
return payload
def format_object(self, payload):
formatted_object = dict()
formatted_object['name'] = payload['name']
formatted_object['description'] = payload['description']
if payload['required']:
formatted_object['required'] = payload['required']
formatted_object['properties'] = []
for property in payload['properties']:
formatted_property = self.format_property(property)
formatted_object['properties'].append(formatted_property)
return formatted_object
def format_property(self, payload):
prop_data = dict()
prop_data['property'] = payload['name']
for key, value in six.iteritems(payload):
if key not in self.property_delete_keys and value:
prop_data[key] = value
return prop_data
|
midgardproject/midgard-core
|
refs/heads/master
|
tests/GIR/test_900_object_reference.py
|
1
|
# coding=utf-8
import os
import sys
import struct
import unittest
import inspect
from gi.repository import Midgard, GObject, GLib
from test_020_connection import TestConnection
class TestObjectReference(unittest.TestCase):
mgd = None
guid = None
name = "TestReference"
workspace = None
reference = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
self.guid = Midgard.Guid.new(self.mgd)
self.reference = Midgard.ObjectReference(id = self.guid, name = self.name)
def tearDown(self):
self.reference = None
self.mgd.close()
self.mgd = None
def testInheritance(self):
self.assertIsInstance(self.reference, GObject.Object)
self.assertIsInstance(self.reference, Midgard.ModelReference)
def testGetName(self):
self.assertNotEqual(self.reference.get_name(), None)
self.assertEqual(self.reference.get_name(), self.name)
def testGetID(self):
self.assertNotEqual(self.reference.get_id(), None)
self.assertEqual(self.reference.get_id(), self.guid)
def testGetIDValue(self):
self.assertNotEqual(self.reference.get_id_value(), None)
self.assertEqual(self.reference.get_id_value(), self.guid)
def testGetIDValueInteger(self):
reference = Midgard.ObjectReference(id = 123, name = "abc")
self.assertNotEqual(reference.get_id_value(), None)
self.assertEqual(reference.get_id_value(), 123)
def testGetWorkspace(self):
self.assertEqual(self.reference.get_workspace(), None)
if __name__ == "__main__":
unittest.main()
|
Salat-Cx65/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/conch/insults/colors.py
|
146
|
"""
You don't really want to use this module. Try helper.py instead.
"""
CLEAR = 0
BOLD = 1
DIM = 2
ITALIC = 3
UNDERSCORE = 4
BLINK_SLOW = 5
BLINK_FAST = 6
REVERSE = 7
CONCEALED = 8
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
|
sudosurootdev/external_chromium_org
|
refs/heads/L5
|
remoting/webapp/build-webapp.py
|
42
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a directory with with the unpacked contents of the remoting webapp.
The directory will contain a copy-of or a link-to to all remoting webapp
resources. This includes HTML/JS and any plugin binaries. The script also
massages resulting files appropriately with host plugin data. Finally,
a zip archive for all of the above is produced.
"""
# Python 2.5 compatibility
from __future__ import with_statement
import io
import os
import platform
import re
import shutil
import subprocess
import sys
import time
import zipfile
# Update the module path, assuming that this script is in src/remoting/webapp,
# and that the google_api_keys module is in src/google_apis. Note that
# sys.path[0] refers to the directory containing this script.
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(sys.path[0], '../../google_apis')))
import google_api_keys
def findAndReplace(filepath, findString, replaceString):
"""Does a search and replace on the contents of a file."""
oldFilename = os.path.basename(filepath) + '.old'
oldFilepath = os.path.join(os.path.dirname(filepath), oldFilename)
os.rename(filepath, oldFilepath)
with open(oldFilepath) as input:
with open(filepath, 'w') as output:
for s in input:
output.write(s.replace(findString, replaceString))
os.remove(oldFilepath)
def createZip(zip_path, directory):
"""Creates a zipfile at zip_path for the given directory."""
zipfile_base = os.path.splitext(os.path.basename(zip_path))[0]
zip = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for (root, dirs, files) in os.walk(directory):
for f in files:
full_path = os.path.join(root, f)
rel_path = os.path.relpath(full_path, directory)
zip.write(full_path, os.path.join(zipfile_base, rel_path))
zip.close()
def replaceString(destination, placeholder, value):
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'" + placeholder + "'", "'" + value + "'")
def processJinjaTemplate(input_file, output_file, context):
jinja2_path = os.path.normpath(
os.path.join(os.path.abspath(__file__),
'../../../third_party/jinja2'))
sys.path.append(os.path.split(jinja2_path)[0])
import jinja2
(template_path, template_name) = os.path.split(input_file)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path))
template = env.get_template(template_name)
rendered = template.render(context)
io.open(output_file, 'w', encoding='utf-8').write(rendered)
def buildWebApp(buildtype, version, destination, zip_path,
manifest_template, webapp_type, files, locales):
"""Does the main work of building the webapp directory and zipfile.
Args:
buildtype: the type of build ("Official" or "Dev").
destination: A string with path to directory where the webapp will be
written.
zipfile: A string with path to the zipfile to create containing the
contents of |destination|.
manifest_template: jinja2 template file for manifest.
webapp_type: webapp type ("v1", "v2" or "v2_pnacl").
files: An array of strings listing the paths for resources to include
in this webapp.
locales: An array of strings listing locales, which are copied, along
with their directory structure from the _locales directory down.
"""
# Ensure a fresh directory.
try:
shutil.rmtree(destination)
except OSError:
if os.path.exists(destination):
raise
else:
pass
os.mkdir(destination, 0775)
# Use symlinks on linux and mac for faster compile/edit cycle.
#
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
# should_symlink = platform.system() not in ['Windows', 'Microsoft']
#
# TODO(ajwong): Pending decision on http://crbug.com/27185 we may not be
# able to load symlinked resources.
should_symlink = False
# Copy all the files.
for current_file in files:
destination_file = os.path.join(destination, os.path.basename(current_file))
destination_dir = os.path.dirname(destination_file)
if not os.path.exists(destination_dir):
os.makedirs(destination_dir, 0775)
if should_symlink:
# TODO(ajwong): Detect if we're vista or higher. Then use win32file
# to create a symlink in that case.
targetname = os.path.relpath(os.path.realpath(current_file),
os.path.realpath(destination_file))
os.symlink(targetname, destination_file)
else:
shutil.copy2(current_file, destination_file)
# Copy all the locales, preserving directory structure
destination_locales = os.path.join(destination, "_locales")
os.mkdir(destination_locales , 0775)
remoting_locales = os.path.join(destination, "remoting_locales")
os.mkdir(remoting_locales , 0775)
for current_locale in locales:
extension = os.path.splitext(current_locale)[1]
if extension == '.json':
locale_id = os.path.split(os.path.split(current_locale)[0])[1]
destination_dir = os.path.join(destination_locales, locale_id)
destination_file = os.path.join(destination_dir,
os.path.split(current_locale)[1])
os.mkdir(destination_dir, 0775)
shutil.copy2(current_locale, destination_file)
elif extension == '.pak':
destination_file = os.path.join(remoting_locales,
os.path.split(current_locale)[1])
shutil.copy2(current_locale, destination_file)
else:
raise Exception("Unknown extension: " + current_locale);
# Set client plugin type.
client_plugin = 'pnacl' if webapp_type == 'v2_pnacl' else 'native'
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'CLIENT_PLUGIN_TYPE'", "'" + client_plugin + "'")
# Allow host names for google services/apis to be overriden via env vars.
oauth2AccountsHost = os.environ.get(
'OAUTH2_ACCOUNTS_HOST', 'https://accounts.google.com')
oauth2ApiHost = os.environ.get(
'OAUTH2_API_HOST', 'https://www.googleapis.com')
directoryApiHost = os.environ.get(
'DIRECTORY_API_HOST', 'https://www.googleapis.com')
oauth2BaseUrl = oauth2AccountsHost + '/o/oauth2'
oauth2ApiBaseUrl = oauth2ApiHost + '/oauth2'
directoryApiBaseUrl = directoryApiHost + '/chromoting/v1'
replaceString(destination, 'OAUTH2_BASE_URL', oauth2BaseUrl)
replaceString(destination, 'OAUTH2_API_BASE_URL', oauth2ApiBaseUrl)
replaceString(destination, 'DIRECTORY_API_BASE_URL', directoryApiBaseUrl)
# Substitute hosts in the manifest's CSP list.
# Ensure we list the API host only once if it's the same for multiple APIs.
googleApiHosts = ' '.join(set([oauth2ApiHost, directoryApiHost]))
# WCS and the OAuth trampoline are both hosted on talkgadget. Split them into
# separate suffix/prefix variables to allow for wildcards in manifest.json.
talkGadgetHostSuffix = os.environ.get(
'TALK_GADGET_HOST_SUFFIX', 'talkgadget.google.com')
talkGadgetHostPrefix = os.environ.get(
'TALK_GADGET_HOST_PREFIX', 'https://chromoting-client.')
oauth2RedirectHostPrefix = os.environ.get(
'OAUTH2_REDIRECT_HOST_PREFIX', 'https://chromoting-oauth.')
# Use a wildcard in the manifest.json host specs if the prefixes differ.
talkGadgetHostJs = talkGadgetHostPrefix + talkGadgetHostSuffix
talkGadgetBaseUrl = talkGadgetHostJs + '/talkgadget/'
if talkGadgetHostPrefix == oauth2RedirectHostPrefix:
talkGadgetHostJson = talkGadgetHostJs
else:
talkGadgetHostJson = 'https://*.' + talkGadgetHostSuffix
# Set the correct OAuth2 redirect URL.
oauth2RedirectHostJs = oauth2RedirectHostPrefix + talkGadgetHostSuffix
oauth2RedirectHostJson = talkGadgetHostJson
oauth2RedirectPath = '/talkgadget/oauth/chrome-remote-desktop'
oauth2RedirectBaseUrlJs = oauth2RedirectHostJs + oauth2RedirectPath
oauth2RedirectBaseUrlJson = oauth2RedirectHostJson + oauth2RedirectPath
if buildtype == 'Official':
oauth2RedirectUrlJs = ("'" + oauth2RedirectBaseUrlJs +
"/rel/' + chrome.i18n.getMessage('@@extension_id')")
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/rel/*'
else:
oauth2RedirectUrlJs = "'" + oauth2RedirectBaseUrlJs + "/dev'"
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/dev*'
thirdPartyAuthUrlJs = oauth2RedirectBaseUrlJs + "/thirdpartyauth"
thirdPartyAuthUrlJson = oauth2RedirectBaseUrlJson + '/thirdpartyauth*'
replaceString(destination, "TALK_GADGET_URL", talkGadgetBaseUrl)
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'OAUTH2_REDIRECT_URL'", oauth2RedirectUrlJs)
# Configure xmpp server and directory bot settings in the plugin.
xmppServerAddress = os.environ.get(
'XMPP_SERVER_ADDRESS', 'talk.google.com:5222')
xmppServerUseTls = os.environ.get('XMPP_SERVER_USE_TLS', 'true')
directoryBotJid = os.environ.get(
'DIRECTORY_BOT_JID', 'remoting@bot.talk.google.com')
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"Boolean('XMPP_SERVER_USE_TLS')", xmppServerUseTls)
replaceString(destination, "XMPP_SERVER_ADDRESS", xmppServerAddress)
replaceString(destination, "DIRECTORY_BOT_JID", directoryBotJid)
replaceString(destination, "THIRD_PARTY_AUTH_REDIRECT_URL",
thirdPartyAuthUrlJs)
# Set the correct API keys.
# For overriding the client ID/secret via env vars, see google_api_keys.py.
apiClientId = google_api_keys.GetClientID('REMOTING')
apiClientSecret = google_api_keys.GetClientSecret('REMOTING')
apiClientIdV2 = google_api_keys.GetClientID('REMOTING_IDENTITY_API')
replaceString(destination, "API_CLIENT_ID", apiClientId)
replaceString(destination, "API_CLIENT_SECRET", apiClientSecret)
# Use a consistent extension id for unofficial builds.
if buildtype != 'Official':
manifestKey = '"key": "remotingdevbuild",'
else:
manifestKey = ''
# Generate manifest.
context = {
'webapp_type': webapp_type,
'FULL_APP_VERSION': version,
'MANIFEST_KEY_FOR_UNOFFICIAL_BUILD': manifestKey,
'OAUTH2_REDIRECT_URL': oauth2RedirectUrlJson,
'TALK_GADGET_HOST': talkGadgetHostJson,
'THIRD_PARTY_AUTH_REDIRECT_URL': thirdPartyAuthUrlJson,
'REMOTING_IDENTITY_API_CLIENT_ID': apiClientIdV2,
'OAUTH2_BASE_URL': oauth2BaseUrl,
'OAUTH2_API_BASE_URL': oauth2ApiBaseUrl,
'DIRECTORY_API_BASE_URL': directoryApiBaseUrl,
'OAUTH2_ACCOUNTS_HOST': oauth2AccountsHost,
'GOOGLE_API_HOSTS': googleApiHosts,
}
processJinjaTemplate(manifest_template,
os.path.join(destination, 'manifest.json'),
context)
# Make the zipfile.
createZip(zip_path, destination)
return 0
def main():
if len(sys.argv) < 6:
print ('Usage: build-webapp.py '
'<build-type> <version> <dst> <zip-path> <manifest_template> '
'<webapp_type> <other files...> '
'[--locales <locales...>]')
return 1
arg_type = ''
files = []
locales = []
for arg in sys.argv[7:]:
if arg in ['--locales']:
arg_type = arg
elif arg_type == '--locales':
locales.append(arg)
else:
files.append(arg)
return buildWebApp(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], sys.argv[6], files, locales)
if __name__ == '__main__':
sys.exit(main())
|
shakamunyi/ansible
|
refs/heads/devel
|
lib/ansible/playbook/role/definition.py
|
10
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.utils.path import unfrackpath
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, role_basedir=None):
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
super(RoleDefinition, self).__init__()
#def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# save the original ds for use later
self._ds = ds
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
else:
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
u'./roles',
self._loader.get_basedir(),
u'./'
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
role_search_paths.extend(configured_paths)
# finally, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# FIXME: make the parser smart about list/string entries in
# the yaml so the error line/file can be reported here
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)))
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
if key not in [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()]:
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
|
gandarez/wakatime
|
refs/heads/master
|
wakatime/packages/pygments_py2/pygments/styles/paraiso_dark.py
|
126
|
# -*- coding: utf-8 -*-
"""
pygments.styles.paraiso_dark
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paraíso (Dark) by Jan T. Sott
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson
(https://github.com/chriskempson/base16-builder).
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#2f1e2e"
CURRENT_LINE = "#41323f"
SELECTION = "#4f424c"
FOREGROUND = "#e7e9db"
COMMENT = "#776e71"
RED = "#ef6155"
ORANGE = "#f99b15"
YELLOW = "#fec418"
GREEN = "#48b685"
AQUA = "#5bc4bf"
BLUE = "#06b6ef"
PURPLE = "#815ba4"
class ParaisoDarkStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
strk/mapnik
|
refs/heads/master
|
scons/scons-local-2.2.0/SCons/Variables/__init__.py
|
14
|
"""engine.SCons.Variables
This file defines the Variables class that is used to add user-friendly
customizable variables to an SCons build.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Variables/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os.path
import sys
import SCons.Environment
import SCons.Errors
import SCons.Util
import SCons.Warnings
from BoolVariable import BoolVariable # okay
from EnumVariable import EnumVariable # okay
from ListVariable import ListVariable # naja
from PackageVariable import PackageVariable # naja
from PathVariable import PathVariable # okay
class Variables(object):
instance=None
"""
Holds all the options, updates the environment with the variables,
and renders the help text.
"""
def __init__(self, files=[], args={}, is_global=1):
"""
files - [optional] List of option configuration files to load
(backward compatibility) If a single string is passed it is
automatically placed in a file list
"""
self.options = []
self.args = args
if not SCons.Util.is_List(files):
if files:
files = [ files ]
else:
files = []
self.files = files
self.unknown = {}
# create the singleton instance
if is_global:
self=Variables.instance
if not Variables.instance:
Variables.instance=self
def _do_add(self, key, help="", default=None, validator=None, converter=None):
class Variable(object):
pass
option = Variable()
# if we get a list or a tuple, we take the first element as the
# option key and store the remaining in aliases.
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
option.key = key[0]
option.aliases = key[1:]
else:
option.key = key
option.aliases = [ key ]
option.help = help
option.default = default
option.validator = validator
option.converter = converter
self.options.append(option)
# options might be added after the 'unknown' dict has been set up,
# so we remove the key and all its aliases from that dict
for alias in list(option.aliases) + [ option.key ]:
if alias in self.unknown:
del self.unknown[alias]
def keys(self):
"""
Returns the keywords for the options
"""
return [o.key for o in self.options]
def Add(self, key, help="", default=None, validator=None, converter=None, **kw):
"""
Add an option.
key - the name of the variable, or a list or tuple of arguments
help - optional help text for the options
default - optional default value
validator - optional function that is called to validate the option's value
Called with (key, value, environment)
converter - optional function that is called to convert the option's value before
putting it in the environment.
"""
if SCons.Util.is_List(key) or isinstance(key, tuple):
self._do_add(*key)
return
if not SCons.Util.is_String(key) or \
not SCons.Environment.is_valid_construction_var(key):
raise SCons.Errors.UserError("Illegal Variables.Add() key `%s'" % str(key))
self._do_add(key, help, default, validator, converter)
def AddVariables(self, *optlist):
"""
Add a list of options.
Each list element is a tuple/list of arguments to be passed on
to the underlying method for adding options.
Example:
opt.AddVariables(
('debug', '', 0),
('CC', 'The C compiler'),
('VALIDATE', 'An option for testing validation', 'notset',
validator, None),
)
"""
for o in optlist:
self._do_add(*o)
def Update(self, env, args=None):
"""
Update an environment with the option variables.
env - the environment to update.
"""
values = {}
# first set the defaults:
for option in self.options:
if not option.default is None:
values[option.key] = option.default
# next set the value specified in the options file
for filename in self.files:
if os.path.exists(filename):
dir = os.path.split(os.path.abspath(filename))[0]
if dir:
sys.path.insert(0, dir)
try:
values['__name__'] = filename
exec open(filename, 'rU').read() in {}, values
finally:
if dir:
del sys.path[0]
del values['__name__']
# set the values specified on the command line
if args is None:
args = self.args
for arg, value in args.items():
added = False
for option in self.options:
if arg in list(option.aliases) + [ option.key ]:
values[option.key] = value
added = True
if not added:
self.unknown[arg] = value
# put the variables in the environment:
# (don't copy over variables that are not declared as options)
for option in self.options:
try:
env[option.key] = values[option.key]
except KeyError:
pass
# Call the convert functions:
for option in self.options:
if option.converter and option.key in values:
value = env.subst('${%s}'%option.key)
try:
try:
env[option.key] = option.converter(value)
except TypeError:
env[option.key] = option.converter(value, env)
except ValueError, x:
raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x))
# Finally validate the values:
for option in self.options:
if option.validator and option.key in values:
option.validator(option.key, env.subst('${%s}'%option.key), env)
def UnknownVariables(self):
"""
Returns any options in the specified arguments lists that
were not known, declared options in this object.
"""
return self.unknown
def Save(self, filename, env):
"""
Saves all the options in the given file. This file can
then be used to load the options next run. This can be used
to create an option cache file.
filename - Name of the file to save into
env - the environment get the option values from
"""
# Create the file and write out the header
try:
fh = open(filename, 'w')
try:
# Make an assignment in the file for each option
# within the environment that was assigned a value
# other than the default.
for option in self.options:
try:
value = env[option.key]
try:
prepare = value.prepare_to_store
except AttributeError:
try:
eval(repr(value))
except KeyboardInterrupt:
raise
except:
# Convert stuff that has a repr() that
# cannot be evaluated into a string
value = SCons.Util.to_String(value)
else:
value = prepare()
defaultVal = env.subst(SCons.Util.to_String(option.default))
if option.converter:
defaultVal = option.converter(defaultVal)
if str(env.subst('${%s}' % option.key)) != str(defaultVal):
fh.write('%s = %s\n' % (option.key, repr(value)))
except KeyError:
pass
finally:
fh.close()
except IOError, x:
raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x))
def GenerateHelpText(self, env, sort=None):
"""
Generate the help text for the options.
env - an environment that is used to get the current values
of the options.
"""
if sort:
options = sorted(self.options, key=lambda x: x.key)
else:
options = self.options
def format(opt, self=self, env=env):
if opt.key in env:
actual = env.subst('${%s}' % opt.key)
else:
actual = None
return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases)
lines = [_f for _f in map(format, options) if _f]
return ''.join(lines)
format = '\n%s: %s\n default: %s\n actual: %s\n'
format_ = '\n%s: %s\n default: %s\n actual: %s\n aliases: %s\n'
def FormatVariableHelpText(self, env, key, help, default, actual, aliases=[]):
# Don't display the key name itself as an alias.
aliases = [a for a in aliases if a != key]
if len(aliases)==0:
return self.format % (key, help, default, actual)
else:
return self.format_ % (key, help, default, actual, aliases)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
snakeleon/YouCompleteMe-x86
|
refs/heads/master
|
third_party/ycmd/third_party/bottle/test/test_jinja2.py
|
50
|
# -*- coding: utf-8 -*-
import unittest
from bottle import Jinja2Template, jinja2_template, jinja2_view, touni
from tools import warn
class TestJinja2Template(unittest.TestCase):
def test_string(self):
""" Templates: Jinja2 string"""
t = Jinja2Template('start {{var}} end').render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_file(self):
""" Templates: Jinja2 file"""
t = Jinja2Template(name='./views/jinja2_simple.tpl').render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_name(self):
""" Templates: Jinja2 lookup by name """
t = Jinja2Template(name='jinja2_simple', lookup=['./views/']).render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_notfound(self):
""" Templates: Unavailable templates"""
self.assertRaises(Exception, Jinja2Template, name="abcdef")
def test_error(self):
""" Templates: Exceptions"""
self.assertRaises(Exception, Jinja2Template, '{% for badsyntax')
def test_inherit(self):
""" Templates: Jinja2 lookup and inherience """
t = Jinja2Template(name='jinja2_inherit', lookup=['./views/']).render()
self.assertEqual('begin abc end', ''.join(t))
def test_custom_filters(self):
"""Templates: jinja2 custom filters """
from bottle import jinja2_template as template
settings = dict(filters = {"star": lambda var: touni("").join((touni('*'), var, touni('*')))})
t = Jinja2Template("start {{var|star}} end", **settings)
self.assertEqual("start *var* end", t.render(var="var"))
def test_custom_tests(self):
"""Templates: jinja2 custom tests """
from bottle import jinja2_template as template
TEMPL = touni("{% if var is even %}gerade{% else %}ungerade{% endif %}")
settings = dict(tests={"even": lambda x: False if x % 2 else True})
t = Jinja2Template(TEMPL, **settings)
self.assertEqual("gerade", t.render(var=2))
self.assertEqual("ungerade", t.render(var=1))
def test_template_shortcut(self):
result = jinja2_template('start {{var}} end', var='middle')
self.assertEqual(touni('start middle end'), result)
def test_view_decorator(self):
@jinja2_view('start {{var}} end')
def test():
return dict(var='middle')
self.assertEqual(touni('start middle end'), test())
try:
import jinja2
except ImportError:
warn("No Jinja2 template support. Skipping tests.")
del TestJinja2Template
if __name__ == '__main__': #pragma: no cover
unittest.main()
|
steveklabnik/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
anbasile/flask_sample
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py
|
2762
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
ibinti/intellij-community
|
refs/heads/master
|
python/testData/refactoring/pullup/severalParents.after.py
|
80
|
class Spam:
pass
class Parent_1(object, Spam):
pass
class Parent_2():
pass
class Child(Parent_1, Parent_2):
pass
|
slisson/intellij-community
|
refs/heads/master
|
python/testData/completion/superInit.py
|
83
|
class Matrix(object):
def __init__(self):
pass
class Vector3(Matrix):
def __init__(self):
super(Vector3, self).__in<caret>
|
sunils34/buffer-django-nonrel
|
refs/heads/master
|
django/templatetags/l10n.py
|
247
|
from django.conf import settings
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import formats
from django.utils.encoding import force_unicode
register = Library()
def localize(value):
"""
Forces a value to be rendered as a localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(formats.localize(value, use_l10n=True))
localize.is_safe = False
def unlocalize(value):
"""
Forces a value to be rendered as a non-localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(value)
unlocalize.is_safe = False
class LocalizeNode(Node):
def __init__(self, nodelist, use_l10n):
self.nodelist = nodelist
self.use_l10n = use_l10n
def __repr__(self):
return "<LocalizeNode>"
def render(self, context):
old_setting = context.use_l10n
context.use_l10n = self.use_l10n
output = self.nodelist.render(context)
context.use_l10n = old_setting
return output
@register.tag('localize')
def localize_tag(parser, token):
"""
Forces or prevents localization of values, regardless of the value of
`settings.USE_L10N`.
Sample usage::
{% localize off %}
var pi = {{ 3.1415 }};
{% endlocalize %}
"""
use_l10n = None
bits = list(token.split_contents())
if len(bits) == 1:
use_l10n = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_l10n = bits[1] == 'on'
nodelist = parser.parse(('endlocalize',))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
register.filter(localize)
register.filter(unlocalize)
|
akosyakov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/ElseBody.after.py
|
79
|
def foo():
for arg in sys.argv[1:]:
try:
f = open(arg, 'r')
except IOError:
print('cannot open', arg)
else:
baz(f)
#anything else you need
def baz(f_new):
length = len(f_new.readlines()) # <---extract something from here
print("hi from else")
|
kuri65536/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/samples/oauth/2_legged_oauth.py
|
128
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import gdata.contacts
import gdata.contacts.service
import gdata.docs
import gdata.docs.service
CONSUMER_KEY = 'yourdomain.com'
CONSUMER_SECRET = 'YOUR_CONSUMER_KEY'
SIG_METHOD = gdata.auth.OAuthSignatureMethod.HMAC_SHA1
requestor_id = 'any.user@yourdomain.com'
# Contacts Data API ============================================================
contacts = gdata.contacts.service.ContactsService()
contacts.SetOAuthInputParameters(
SIG_METHOD, CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
two_legged_oauth=True, requestor_id=requestor_id)
# GET - fetch user's contact list
print "\nList of contacts for %s:" % (requestor_id,)
feed = contacts.GetContactsFeed()
for entry in feed.entry:
print entry.title.text
# GET - fetch another user's contact list
requestor_id = 'another_user@yourdomain.com'
print "\nList of contacts for %s:" % (requestor_id,)
contacts.GetOAuthInputParameters().requestor_id = requestor_id
feed = contacts.GetContactsFeed()
for entry in feed.entry:
print entry.title.text
# Google Documents List Data API ===============================================
docs = gdata.docs.service.DocsService()
docs.SetOAuthInputParameters(
SIG_METHOD, CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
two_legged_oauth=True, requestor_id=requestor_id)
# POST - upload a document
print "\nUploading document to %s's Google Documents account:" % (requestor_id,)
ms = gdata.MediaSource(
file_path='/path/to/test.txt',
content_type=gdata.docs.service.SUPPORTED_FILETYPES['TXT'])
# GET - fetch user's document list
entry = docs.UploadDocument(ms, 'Company Perks')
print 'Document now accessible online at:', entry.GetAlternateLink().href
print "\nList of Google Documents for %s" % (requestor_id,)
feed = docs.GetDocumentListFeed()
for entry in feed.entry:
print entry.title.text
|
kemiz/tosca-vcloud-plugin
|
refs/heads/master
|
tests/unittests/test_mock_network_plugin_floatingip.py
|
2
|
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import mock
import unittest
from tests.unittests import test_mock_base
from network_plugin import floatingip
from cloudify import exceptions as cfy_exc
import network_plugin
import vcloud_plugin_common
class NetworkPluginFloatingIpMockTestCase(test_mock_base.TestBase):
def test_add_nat_rule_snat(self):
fake_ctx = self.generate_node_context()
with mock.patch('network_plugin.floatingip.ctx', fake_ctx):
gateway = mock.Mock()
gateway._add_nat_rule = mock.MagicMock(return_value=None)
floatingip._add_nat_rule(
gateway, 'SNAT', 'internal', 'external'
)
gateway.add_nat_rule.assert_called_with(
'SNAT', 'internal', 'any', 'external', 'any', 'any'
)
def test_add_nat_rule_dnat(self):
fake_ctx = self.generate_node_context()
with mock.patch('network_plugin.floatingip.ctx', fake_ctx):
gateway = mock.Mock()
gateway._add_nat_rule = mock.MagicMock(return_value=None)
floatingip._add_nat_rule(
gateway, 'DNAT', 'internal', 'external'
)
gateway.add_nat_rule.assert_called_with(
'DNAT', 'internal', 'any', 'external', 'any', 'any'
)
def test_del_nat_rule_snat(self):
fake_ctx = self.generate_node_context()
with mock.patch('network_plugin.floatingip.ctx', fake_ctx):
gateway = mock.Mock()
gateway.del_nat_rule = mock.MagicMock(return_value=None)
floatingip._del_nat_rule(
gateway, 'SNAT', 'internal', 'external'
)
gateway.del_nat_rule.assert_called_with(
'SNAT', 'internal', 'any', 'external', 'any', 'any'
)
def test_del_nat_rule_dnat(self):
fake_ctx = self.generate_node_context()
with mock.patch('network_plugin.floatingip.ctx', fake_ctx):
gateway = mock.Mock()
gateway.del_nat_rule = mock.MagicMock(return_value=None)
floatingip._del_nat_rule(
gateway, 'DNAT', 'internal', 'external'
)
gateway.del_nat_rule.assert_called_with(
'DNAT', 'internal', 'any', 'external', 'any', 'any'
)
def test_creation_validation(self):
fake_client = self.generate_client()
# no floating_ip
fake_ctx = self.generate_node_context(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
floatingip.creation_validation(ctx=fake_ctx)
# no edge gateway
fake_ctx = self.generate_node_context(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'floatingip': {
'some_field': 'some value'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
floatingip.creation_validation(ctx=fake_ctx)
# with edge gateway, but wrong ip
fake_ctx = self.generate_node_context(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: 'some'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
floatingip.creation_validation(ctx=fake_ctx)
# with edge gateway, ip from pool
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'floatingip': {
'edge_gateway': 'gateway',
'service_type': vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
}
})
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(
return_value=['10.18.1.1']
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
floatingip.creation_validation(ctx=fake_ctx)
# with some free ip
fake_ctx = self.generate_node_context(properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.10.1.2',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
})
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.1.1.1', '10.1.1.2'
])
rule_inlist = self.generate_nat_rule(
'DNAT', '10.1.1.1', 'any', '123.1.1.1', '11', 'TCP'
)
fake_client.get_nat_rules = mock.MagicMock(
return_value=[rule_inlist]
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
floatingip.creation_validation(ctx=fake_ctx)
def generate_client_and_context_floating_ip(
self, service_type=vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
):
# client
vms_networks = [{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': '1.1.1.1'
}]
fake_client = self.generate_client(vms_networks=vms_networks)
self.set_network_routed_in_client(fake_client)
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# ctx
fake_ctx = self.generate_relation_context()
fake_ctx._source.node.properties = {
'vcloud_config': {
'service_type': service_type,
'org': 'some_org',
'vdc': 'some_vdc',
}
}
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {}
return fake_client, fake_ctx
def test_floatingip_operation_delete(self):
"""
check for floating_ip operations/delete
"""
# no public_ip delete
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
floatingip._floatingip_operation(
network_plugin.DELETE, fake_client, fake_ctx
)
# busy in save with ip in node_properties
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
self.set_services_conf_result(
fake_client._vdc_gateway, None
)
self.set_gateway_busy(fake_client._vdc_gateway)
self.prepare_retry(fake_ctx)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.10.1.2'
}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
self.assertFalse(floatingip._floatingip_operation(
network_plugin.DELETE, fake_client, fake_ctx
))
# busy in save with ip in runtime_properties
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
self.set_services_conf_result(
fake_client._vdc_gateway, None
)
self.set_gateway_busy(fake_client._vdc_gateway)
self.prepare_retry(fake_ctx)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '10.10.1.2'
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
self.assertFalse(floatingip._floatingip_operation(
network_plugin.DELETE, fake_client, fake_ctx
))
# unknow operation
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.10.1.2'
}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
floatingip._floatingip_operation(
"unknow", fake_client, fake_ctx
)
# delete to end, ondemand
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '10.10.1.2'
}
fake_client._vdc_gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
floatingip._floatingip_operation(
network_plugin.DELETE, fake_client, fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertFalse(
network_plugin.PUBLIC_IP in runtime_properties
)
# delete to end, subscription
fake_client, fake_ctx = self.generate_client_and_context_floating_ip(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '10.10.1.2'
}
fake_client._vdc_gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
floatingip._floatingip_operation(
network_plugin.DELETE, fake_client, fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertFalse(
network_plugin.PUBLIC_IP in runtime_properties
)
def test_disconnect_floatingip(self):
fake_client, fake_ctx = self.generate_client_and_context_floating_ip()
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
network_plugin.PUBLIC_IP: '10.10.1.2'
}
fake_client._vdc_gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
floatingip.disconnect_floatingip(
ctx=fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertFalse(
network_plugin.PUBLIC_IP in runtime_properties
)
def test_connect_floatingip(self):
"""
check connect_floatingip with explicitly defined ip
"""
fake_client, fake_ctx = self.generate_client_and_context_floating_ip(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.10.2.3'
}
}
fake_ctx._target.instance.runtime_properties = {}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1', '10.10.2.3'
])
fake_client._vdc_gateway.get_nat_rules = mock.MagicMock(
return_value=[]
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
floatingip.connect_floatingip(
ctx=fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertTrue(
network_plugin.PUBLIC_IP in runtime_properties
)
self.assertEqual(
runtime_properties.get(network_plugin.PUBLIC_IP),
'10.10.2.3'
)
def test_floatingip_operation_create(self):
"""
check for floating_ip operations/create
"""
# create to end
fake_client, fake_ctx = self.generate_client_and_context_floating_ip(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1'
])
fake_client._vdc_gateway.get_nat_rules = mock.MagicMock(
return_value=[]
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
floatingip._floatingip_operation(
network_plugin.CREATE, fake_client, fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertTrue(
network_plugin.PUBLIC_IP in runtime_properties
)
self.assertEqual(
runtime_properties.get(network_plugin.PUBLIC_IP),
'10.18.1.1'
)
# with already explicitly defined ip
fake_client, fake_ctx = self.generate_client_and_context_floating_ip(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'floatingip': {
'edge_gateway': 'gateway',
network_plugin.PUBLIC_IP: '10.10.2.3'
}
}
fake_ctx._target.instance.runtime_properties = {}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1', '10.10.2.3'
])
fake_client._vdc_gateway.get_nat_rules = mock.MagicMock(
return_value=[]
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with mock.patch(
'network_plugin.floatingip.ctx', fake_ctx
):
floatingip._floatingip_operation(
network_plugin.CREATE, fake_client, fake_ctx
)
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertTrue(
network_plugin.PUBLIC_IP in runtime_properties
)
self.assertEqual(
runtime_properties.get(network_plugin.PUBLIC_IP),
'10.10.2.3'
)
if __name__ == '__main__':
unittest.main()
|
public-ink/public-ink
|
refs/heads/master
|
server/appengine-staging/lib/graphql/validation/rules/lone_anonymous_operation.py
|
3
|
from ...error import GraphQLError
from ...language import ast
from .base import ValidationRule
class LoneAnonymousOperation(ValidationRule):
__slots__ = 'operation_count',
def __init__(self, context):
self.operation_count = 0
super(LoneAnonymousOperation, self).__init__(context)
def enter_Document(self, node, key, parent, path, ancestors):
self.operation_count = \
sum(1 for definition in node.definitions if isinstance(definition, ast.OperationDefinition))
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
if not node.name and self.operation_count > 1:
self.context.report_error(GraphQLError(self.anonymous_operation_not_alone_message(), [node]))
@staticmethod
def anonymous_operation_not_alone_message():
return 'This anonymous operation must be the only defined operation.'
|
andrius-preimantas/odoo
|
refs/heads/master
|
addons/website/tests/test_converter.py
|
61
|
# -*- coding: utf-8 -*-
import textwrap
import unittest2
from lxml import etree, html
from lxml.builder import E
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.ir_qweb import html_to_text
from openerp.addons.website.models.website import slugify, unslug
class TestUnslug(unittest2.TestCase):
def test_unslug(self):
tests = {
'': (None, None),
'foo': (None, None),
'foo-': (None, None),
'-': (None, None),
'foo-1': ('foo', 1),
'foo-bar-1': ('foo-bar', 1),
'foo--1': ('foo', -1),
'1': (None, 1),
'1-1': ('1', 1),
'--1': (None, None),
'foo---1': (None, None),
'foo1': (None, None),
}
for slug, expected in tests.iteritems():
self.assertEqual(unslug(slug), expected)
class TestHTMLToText(unittest2.TestCase):
def test_rawstring(self):
self.assertEqual(
"foobar",
html_to_text(E.div("foobar")))
def test_br(self):
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.br(), "bar")))
self.assertEqual(
"foo\n\nbar\nbaz",
html_to_text(E.div(
"foo", E.br(), E.br(),
"bar", E.br(),
"baz")))
def test_p(self):
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
"foo",
E.p("bar"),
"baz")))
self.assertEqual(
"foo",
html_to_text(E.div(E.p("foo"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div("foo", E.p("bar"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div(E.p("foo"), "bar")))
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
E.p("foo"),
E.p("bar"),
E.p("baz"),
)))
def test_div(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
"baz"
)))
self.assertEqual(
"foo",
html_to_text(E.div(E.div("foo"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.div("bar"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div(E.div("foo"), "bar")))
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
E.div("baz")
)))
def test_other_block(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.section("bar"),
"baz"
)))
def test_inline(self):
self.assertEqual(
"foobarbaz",
html_to_text(E.div("foo", E.span("bar"), "baz")))
def test_whitespace(self):
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
"foo\nbar",
E.br(),
"baz")
))
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
E.div(E.span("foo"), " bar"),
"baz")))
class TestConvertBack(common.TransactionCase):
def setUp(self):
super(TestConvertBack, self).setUp()
def field_rountrip_result(self, field, value, expected):
model = 'website.converter.test'
Model = self.registry(model)
id = Model.create(
self.cr, self.uid, {
field: value
})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(
rendered, parser=html.HTMLParser(encoding='utf-8'))
column = Model._all_columns[field].column
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, column, element)
if isinstance(expected, str):
expected = expected.decode('utf-8')
self.assertEqual(value_back, expected)
def field_roundtrip(self, field, value):
self.field_rountrip_result(field, value, value)
def test_integer(self):
self.field_roundtrip('integer', 42)
def test_float(self):
self.field_roundtrip('float', 42.567890)
self.field_roundtrip('float', 324542.567890)
def test_numeric(self):
self.field_roundtrip('numeric', 42.77)
def test_char(self):
self.field_roundtrip('char', "foo bar")
self.field_roundtrip('char', "ⒸⓄⓇⒼⒺ")
def test_selection(self):
self.field_roundtrip('selection', 3)
def test_selection_str(self):
self.field_roundtrip('selection_str', 'B')
def test_text(self):
self.field_roundtrip('text', textwrap.dedent("""\
You must obey the dance commander
Givin' out the order for fun
You must obey the dance commander
You know that he's the only one
Who gives the orders here,
Alright
Who gives the orders here,
Alright
It would be awesome
If we could dance-a
It would be awesome, yeah
Let's take the chance-a
It would be awesome, yeah
Let's start the show
Because you never know
You never know
You never know until you go"""))
def test_m2o(self):
""" the M2O field conversion (from html) is markedly different from
others as it directly writes into the m2o and returns nothing at all.
"""
model = 'website.converter.test'
field = 'many2one'
Sub = self.registry('website.converter.test.sub')
sub_id = Sub.create(self.cr, self.uid, {'name': "Foo"})
Model = self.registry(model)
id = Model.create(self.cr, self.uid, {field: sub_id})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(rendered, parser=html.HTMLParser(encoding='utf-8'))
# emulate edition
element.text = "New content"
column = Model._all_columns[field].column
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, column, element)
self.assertIsNone(
value_back, "the m2o converter should return None to avoid spurious"
" or useless writes on the parent record")
self.assertEqual(
Sub.browse(self.cr, self.uid, sub_id).name,
"New content",
"element edition should have been written directly to the m2o record"
)
class TestTitleToSlug(unittest2.TestCase):
"""
Those tests should pass with or without python-slugify
See website/models/website.py slugify method
"""
def test_spaces(self):
self.assertEqual(
"spaces",
slugify(u" spaces ")
)
def test_unicode(self):
self.assertEqual(
"heterogeneite",
slugify(u"hétérogénéité")
)
def test_underscore(self):
self.assertEqual(
"one-two",
slugify(u"one_two")
)
def test_caps(self):
self.assertEqual(
"camelcase",
slugify(u"CamelCase")
)
def test_special_chars(self):
self.assertEqual(
"o-d-o-o",
slugify(u"o!#d{|\o/@~o&%^?")
)
def test_str_to_unicode(self):
self.assertEqual(
"espana",
slugify("España")
)
def test_numbers(self):
self.assertEqual(
"article-1",
slugify(u"Article 1")
)
def test_all(self):
self.assertEqual(
"do-you-know-martine-a-la-plage",
slugify(u"Do YOU know 'Martine à la plage' ?")
)
|
lanen/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/jove.py
|
177
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate
)
class JoveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?jove\.com/video/(?P<id>[0-9]+)'
_CHAPTERS_URL = 'http://www.jove.com/video-chapters?videoid={video_id:}'
_TESTS = [
{
'url': 'http://www.jove.com/video/2744/electrode-positioning-montage-transcranial-direct-current',
'md5': '93723888d82dbd6ba8b3d7d0cd65dd2b',
'info_dict': {
'id': '2744',
'ext': 'mp4',
'title': 'Electrode Positioning and Montage in Transcranial Direct Current Stimulation',
'description': 'md5:015dd4509649c0908bc27f049e0262c6',
'thumbnail': 're:^https?://.*\.png$',
'upload_date': '20110523',
}
},
{
'url': 'http://www.jove.com/video/51796/culturing-caenorhabditis-elegans-axenic-liquid-media-creation',
'md5': '914aeb356f416811d911996434811beb',
'info_dict': {
'id': '51796',
'ext': 'mp4',
'title': 'Culturing Caenorhabditis elegans in Axenic Liquid Media and Creation of Transgenic Worms by Microparticle Bombardment',
'description': 'md5:35ff029261900583970c4023b70f1dc9',
'thumbnail': 're:^https?://.*\.png$',
'upload_date': '20140802',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
chapters_id = self._html_search_regex(
r'/video-chapters\?videoid=([0-9]+)', webpage, 'chapters id')
chapters_xml = self._download_xml(
self._CHAPTERS_URL.format(video_id=chapters_id),
video_id, note='Downloading chapters XML',
errnote='Failed to download chapters XML')
video_url = chapters_xml.attrib.get('video')
if not video_url:
raise ExtractorError('Failed to get the video URL')
title = self._html_search_meta('citation_title', webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._html_search_regex(
r'<div id="section_body_summary"><p class="jove_content">(.+?)</p>',
webpage, 'description', fatal=False)
publish_date = unified_strdate(self._html_search_meta(
'citation_publication_date', webpage, 'publish date', fatal=False))
comment_count = self._html_search_regex(
r'<meta name="num_comments" content="(\d+) Comments?"',
webpage, 'comment count', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
'upload_date': publish_date,
'comment_count': comment_count,
}
|
hpfem/agros2d
|
refs/heads/master
|
resources/python/pyflakes/api.py
|
1
|
"""
API for the command-line I{pyflakes} tool.
"""
from __future__ import with_statement
import sys
import os
import _ast
from optparse import OptionParser
from pyflakes import checker, __version__
from pyflakes import reporter as modReporter
__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
universal_newline = ('U' if sys.version_info < (3, 0) else 'r')
def check(codeString, filename, reporter=None):
"""
Check the Python source given by C{codeString} for flakes.
@param codeString: The Python source to check.
@type codeString: C{str}
@param filename: The name of the file the source came from, used to report
errors.
@type filename: C{str}
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: The number of warnings emitted.
@rtype: C{int}
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return 1
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
# Okay, it's syntactically valid. Now check it.
w = checker.Checker(tree, filename)
w.messages.sort(key=lambda m: m.lineno)
for warning in w.messages:
reporter.flake(warning)
return len(w.messages)
def checkPath(filename, reporter=None):
"""
Check the given path, printing out any warnings detected.
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: the number of warnings printed
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
try:
with open(filename, universal_newline) as f:
codestr = f.read() + '\n'
except UnicodeError:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
except IOError:
msg = sys.exc_info()[1]
reporter.unexpectedError(filename, msg.args[1])
return 1
return check(codestr, filename, reporter)
def iterSourceCode(paths):
"""
Iterate over all Python source files in C{paths}.
@param paths: A list of paths. Directories will be recursed into and
any .py files found will be yielded. Any non-directories will be
yielded as-is.
"""
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith('.py'):
yield os.path.join(dirpath, filename)
else:
yield path
def checkRecursive(paths, reporter):
"""
Recursively check all source files in C{paths}.
@param paths: A list of paths to Python source files and directories
containing Python source files.
@param reporter: A L{Reporter} where all of the warnings and errors
will be reported to.
@return: The number of warnings found.
"""
warnings = 0
for sourcePath in iterSourceCode(paths):
warnings += checkPath(sourcePath, reporter)
return warnings
def main(prog=None):
parser = OptionParser(prog=prog, version=__version__)
(__, args) = parser.parse_args()
reporter = modReporter._makeDefaultReporter()
if args:
warnings = checkRecursive(args, reporter)
else:
warnings = check(sys.stdin.read(), '<stdin>', reporter)
raise SystemExit(warnings > 0)
|
jptomo/rpython-lang-scheme
|
refs/heads/master
|
rpython/tool/test/test_nullpath.py
|
2
|
import sys, os
import py
from rpython.tool.nullpath import NullPyPathLocal
def test_nullpath(tmpdir):
path = NullPyPathLocal(tmpdir)
assert repr(path).endswith('[fake]')
foo_txt = path.join('foo.txt')
assert isinstance(foo_txt, NullPyPathLocal)
#
f = foo_txt.open('w')
assert f.name == os.devnull
|
sfaci/fighter2d
|
refs/heads/master
|
fighter2d/BigEnemy.py
|
1
|
## BigEnemy
## Clase que implementa un tipo de enemigo mas grande y resistente
##
## Author: Santiago Faci
## Version: 2.0
from direct.showbase.DirectObject import DirectObject
import random
from Enemy import Enemy
VELOCIDAD = -2
VELOCIDAD_MISIL = 10
Y = 55
FRECUENCIA_DISPARO = 0.2
PUNTOS = 200
VIDA = 4
class BigEnemy(Enemy):
def __init__(self, x0, z0, modelo):
# Invoca al constructor de la clase base
Enemy.__init__(self, x0, z0, modelo)
# Inicializa algunos atributos con valores diferentes
self.ship.setScale(4, 1, 3)
self.ia = False
self.vida = VIDA
self.puntos = PUNTOS
self.velocidad = VELOCIDAD
# Destructor del objeto
def __del__(self):
pass;
|
ckuethe/gr-chancoding
|
refs/heads/master
|
python/qa_chancoding_unpacked_to_packed_bvi.py
|
2
|
#!/usr/bin/env python
#
# Copyright 2011 Communications Engineering Lab, KIT
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import chancoding_swig as chancoding
class qa_unpack_bits (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_pack4bits (self):
""" Check packing 1 vector from 4 Bits """
src_data = (1,0,1,0)
expected_data = (5,)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(4)
sink = gr.vector_sink_i()
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
self.assertEqual( sink.data() , expected_data)
def test_002_pack4bits_6_elements_input (self):
""" Check packing 4 Bits when an 6 Bits are available """
src_data = (1,0,0,1,1,0)
expected_data = (9,)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(4)
sink = gr.vector_sink_i()
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
self.assertEqual( sink.data() , expected_data)
def test_003_pack8bits_non_zero (self):
""" Check that only the first 8 bits are used to build the int """
src_data = (1,0,1,1,0,0,0,0,1,0,0,1,0,0,0)
expected_data = (13,)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(8)
sink = gr.vector_sink_i()
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
self.assertEqual( sink.data() , expected_data)
def test_004_pack8bits (self):
""" Check packing 8 bits multiple times """
src_data = (1,0,0,0,0,0,0,0,
1,1,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,
1,0,0,1,0,0,0,0,
0,0,0,0,0,1,0,0,
1,1,0,1,0,1,0,0,
1,1,1,1,1,1,1,1)
expected_data = (1,3,4,0,9,32,43,255)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(8)
sink = gr.vector_sink_i()
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
for i in range( len(expected_data) ):
self.assertEqual( sink.data()[i] , expected_data[i], i)
def test_005_pack32bits (self):
""" Check packing 32 bits with an input of 64 bits """
src_data = (1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
expected_data = (1,1)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(32)
sink = gr.vector_sink_i()
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
for i in range( len(expected_data) ):
self.assertEqual( sink.data()[i] , expected_data[i], i)
def test_006_pack33bits (self):
""" Check packing 33 bits two times """
src_data = (1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)
expected_data = (3,1,3,1)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(33)
sink = gr.vector_sink_i(2)
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
for i in range( len(expected_data) ):
self.assertEqual( sink.data()[i] , expected_data[i], i)
def test_007_pack64bits (self):
""" Check packing 64 bits two times """
src_data = (1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
expected_data = (1,5,2,10)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(64)
sink = gr.vector_sink_i(2)
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
for i in range( len(expected_data) ):
self.assertEqual( sink.data()[i] , expected_data[i], i)
def test_008_pack65bits (self):
""" Check packing 65 bits two times """
src_data = (1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
expected_data = (1,5,1,2,10,0)
src1 = gr.vector_source_b(src_data)
pack_bits = chancoding.unpacked_to_packed_bvi(65)
sink = gr.vector_sink_i(3)
self.tb.connect(src1, pack_bits)
self.tb.connect(pack_bits, sink)
self.tb.run()
for i in range( len(expected_data) ):
self.assertEqual( sink.data()[i] , expected_data[i], i)
# def test_009_packing_unpacking_random1000 (self):
# """ Check packing 32 bits randomly created 1000 times """
# src_data = (1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
# 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
# expected_data = (1,5,2,10)
# src1 = gr.glfsr_source_b(int degree, 0, int mask, int seed)
# pack_bits = chancoding.unpacked_to_packed_bvi(32)
# sink = gr.vector_sink_i()
# self.tb.connect(src1, pack_bits)
# self.tb.connect(pack_bits, sink)
# self.tb.run()
# for i in range( len(expected_data) ):
# self.assertEqual(int( sink.data()[i] ), expected_data[i], i)
if __name__ == '__main__':
gr_unittest.main ()
|
personalunion/glimpse_client-1
|
refs/heads/develop
|
3rdparty/breakpad/src/tools/gyp/test/win/gyptest-link-uldi.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure that when ULDI is on, we link .objs that make up .libs rather than
the .libs themselves.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'uldi'
test.run_gyp('uldi.gyp', chdir=CHDIR)
# When linking with ULDI, the duplicated function from the lib will be an
# error.
test.build('uldi.gyp', 'final_uldi', chdir=CHDIR, status=1)
# And when in libs, the duplicated function will be silently dropped, so the
# build succeeds.
test.build('uldi.gyp', 'final_no_uldi', chdir=CHDIR)
test.pass_test()
|
gtaylor/btmux_maplib
|
refs/heads/master
|
btmux_maplib/constants.py
|
1
|
"""
Common constants that the various modules refer to.
"""
TERRAIN_NAMES = {
" ": "Grassland",
".": "Grassland",
"#": "Road",
"%": "Rough",
"/": "Bridge",
"@": "Building",
"^": "Mountain",
"~": "Water",
"&": "Fire",
"{": "Sand",
"`": "Light_Forest",
"'": "Light_Forest",
"\"": "Heavy_Forest",
"=": "Wall"
}
ELEVATIONS = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
seanjensengrey/moveonpc
|
refs/heads/master
|
examples/python/pair.py
|
7
|
#
# PS Move API - An interface for the PS Move Motion Controller
# Copyright (c) 2011 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'build'))
import psmove
move = psmove.PSMove()
if move.connection_type == psmove.Conn_Bluetooth:
print('bluetooth')
elif move.connection_type == psmove.Conn_USB:
print('usb')
else:
print('unknown')
if len(sys.argv) == 2:
result = move.pair_custom(sys.argv[-1])
else:
result = move.pair()
if result:
print('successfully paired :)')
else:
print('pairing failed :/')
|
erikr/django
|
refs/heads/master
|
django/db/migrations/state.py
|
4
|
from __future__ import unicode_literals
import copy
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, six.string_types):
split = model.split('.', 1)
return (tuple(split) if len(split) == 2 else (app_label, split[0]))
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""
Return all models that have a direct relationship to the given model.
"""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, six.string_types):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
related_models = set()
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
# The lock gets in the way of copying as implemented in clone(), which
# is called whenever Django duplicates a StateApps before updating it.
self._lock = None
self.render_multiple(list(models.values()) + self.real_models)
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.options.setdefault('indexes', [])
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
manager_name = force_text(manager.name)
if manager_name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager_name)
managers.append((manager_name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return force_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
mgr_name = force_text(mgr_name)
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Managers from concrete parents will soon qualify as default managers",
RemovedInDjango20Warning)
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
|
nirb/whatsapp
|
refs/heads/master
|
build/lib/yowsup/layers/protocol_iq/__init__.py
|
70
|
from .layer import YowIqProtocolLayer
|
claudelee/bilibili-api
|
refs/heads/master
|
GetDanmuAss/biclass.py
|
3
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 28 01:22:20 2014
@author: Administrator
"""
class User():
def __init__(self,m_mid=None,m_name=None):
if m_mid:
self.mid = m_mid;
if m_name:
self.name = m_name;
def saveToFile(self,fid):
fid.write('名字:%s\n'%self.name);
fid.write('id:%s\n'%self.mid);
fid.write('是否认证:%s\n'%self.isApprove);
fid.write('空间:%s\n'%self.spaceName);
fid.write('性别:%s\n'%self.sex);
fid.write('账号显示标识:%s\n'%self.rank);
fid.write('头像:%s\n'%self.avatar);
fid.write('关注好友数目:%d\n'%self.follow);
fid.write('粉丝数目:%d\n'%self.fans);
fid.write('投稿数:%d\n'%self.article);
fid.write('地点:%s\n'%self.place);
fid.write('认证信息:%s\n'%self.description);
fid.write('关注好友:\n');
if self.followlist:
for fo in self.followlist:
fid.write('\t%s\n'%fo);
# 获取空间地址
def GetSpace(self):
return 'http://space.bilibili.tv/'+str(self.mid);
mid = None;
name = None;
isApprove = False;#是否是认证账号
spaceName = "";
sex = ""
rank = None;
avatar = None;
follow = 0;#关注好友数目
fans = 0;#粉丝数目
article = 0;#投稿数
place = None;#所在地
description = None;#认证用户为认证信息 普通用户为交友宣言
followlist = None;#关注的好友列表
class Video():
def __init__(self,m_aid=None,m_title=None):
if m_aid:
self.aid = m_aid;
if m_title:
self.title = m_title;
# 写到文件中
def saveToFile(self,fid):
fid.write('av号:%d\n'%self.aid);
fid.write('标题:%s\n'%self.title);
fid.write('观看:%d\n'%self.guankan);
fid.write('收藏:%d\n'%self.shoucang);
fid.write('弹幕:%d\n'%self.danmu);
fid.write('日期:%s\n'%self.date);
fid.write('封面地址:%s\n'%self.cover);
fid.write('Up主:\n');
self.author.saveToFile(fid);
fid.write('\n');
aid = None;
title = None;
guankan = None;
shoucang = None;
danmu = None;
date = None;
cover = None;
commentNumber = None;
description = None;
tag = None;
author = None;
page = None;
credit = None;
coin = None;
spid = None;
cid = None;
offsite = None;#Flash播放调用地址
Iscopy = None;
subtitle = None;
duration = None;
episode = None;
#不明:
tid = None;
typename = None;
instant_server = None;
src = None;
partname = None;
#播放信息:
play_site = None;
play_forward = None;
play_mobile = None;
class Bangumi():
def __init__(self):
pass;
typeid = None;
lastupdate = None;
areaid = None;
bgmcount = None;#番剧当前总集数
title = None;
lastupdate_at = None;
attention = None;
cover = None;
priority = None;
area = None;
weekday = None;
spid = None;
new = None;
scover = None;
mcover = None;
click = None;
season_id = None;
class Comment():
def __init__(self):
self.post_user = User();
lv = None;#楼层
fbid = None;#评论id
msg = None;
ad_check = None;#状态 (0: 正常 1: UP主隐藏 2: 管理员删除 3: 因举报删除)
post_user = None;
class CommentList():
def __init__(self):
pass;
comments = None;
commentLen = None;
page = None;
|
GiladE/birde
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/gis/db/models/lookups.py
|
48
|
from django.db.models.lookups import Lookup
from django.db.models.sql.expressions import SQLEvaluator
class GISLookup(Lookup):
def as_sql(self, qn, connection):
from django.contrib.gis.db.models.sql import GeoWhereNode
# We use the same approach as was used by GeoWhereNode. It would
# be a good idea to upgrade GIS to use similar code that is used
# for other lookups.
if isinstance(self.rhs, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(self.rhs.opts, self.rhs.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
db_type = self.lhs.output_field.db_type(connection=connection)
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, self.rhs, connection=connection)
lhs_sql, lhs_params = self.process_lhs(qn, connection)
# lhs_params not currently supported.
assert not lhs_params
data = (lhs_sql, db_type)
spatial_sql, spatial_params = connection.ops.spatial_lookup_sql(
data, self.lookup_name, self.rhs, self.lhs.output_field, qn)
return spatial_sql, spatial_params + params
|
numenta/nupic.cloudbrain
|
refs/heads/master
|
htmclassifier/labeled_data_generator.py
|
1
|
label_dataset_file = open('training_data.csv', 'wb')
med_data_file = open('training_set_meditation.csv', 'rb')
normal_data_file = open('training_set_normal.csv', 'rb')
test_data_file = open('test_set.csv', 'rb')
import csv
csv_writer = csv.writer(label_dataset_file)
headers = ['metric', 'label']
csv_writer.writerow(headers)
csv_writer.writerow(['float','int'])
csv_writer.writerow([None,'C'])
csv_reader = csv.reader(normal_data_file)
csv_reader.next()
label = 0
count = 0
for row in csv_reader:
csv_writer.writerow([row[1], label])
count +=1
csv_reader = csv.reader(med_data_file)
csv_reader.next()
label = 1
count = 0
for row in csv_reader:
csv_writer.writerow([row[1], label])
count +=1
csv_reader = csv.reader(test_data_file)
csv_reader.next()
csv_reader.next()
csv_reader.next()
label = 1
count = 0
for row in csv_reader:
csv_writer.writerow([row[0], label])
count +=1
label_dataset_file.close()
|
jolyonb/edx-platform
|
refs/heads/master
|
pavelib/paver_tests/test_paver_get_quality_reports.py
|
25
|
"""
Tests to ensure only the report files we want are returned as part of run_quality.
"""
import unittest
from mock import patch
import pavelib.quality
class TestGetReportFiles(unittest.TestCase):
"""
Ensure only the report files we want are returned as part of run_quality.
"""
@patch('os.walk')
def test_get_pylint_reports(self, my_mock):
my_mock.return_value = iter([
('/foo', (None,), ('pylint.report',)),
('/bar', ('/baz',), ('pylint.report',))
])
reports = pavelib.quality.get_violations_reports("pylint")
self.assertEqual(len(reports), 2)
@patch('os.walk')
def test_get_pep8_reports(self, my_mock):
my_mock.return_value = iter([
('/foo', (None,), ('pep8.report',)),
('/bar', ('/baz',), ('pep8.report',))
])
reports = pavelib.quality.get_violations_reports("pep8")
self.assertEqual(len(reports), 2)
@patch('os.walk')
def test_get_pep8_reports_noisy(self, my_mock):
""" Several conditions: different report types, different files, multiple files """
my_mock.return_value = iter([
('/foo', (None,), ('pep8.report',)),
('/fooz', ('/ball',), ('pylint.report',)),
('/fooz', ('/ball',), ('non.report',)),
('/fooz', ('/ball',), ('lms.xml',)),
('/bar', ('/baz',), ('pep8.report',))
])
reports = pavelib.quality.get_violations_reports("pep8")
self.assertEqual(len(reports), 2)
|
letouriste001/SmartForest_2.0
|
refs/heads/master
|
python3.4Smartforest/lib/python3.4/site-packages/wheel/test/test_basic.py
|
472
|
"""
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
OpenSTC-Eleger/stc-m14
|
refs/heads/master
|
report/__init__.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
viki9698/jizhanggroup
|
refs/heads/master
|
django/contrib/gis/utils/wkt.py
|
219
|
"""
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
|
alex/asciinema
|
refs/heads/master
|
asciinema/config.py
|
1
|
import os
import sys
try:
from ConfigParser import RawConfigParser, ParsingError, NoOptionError
except ImportError:
from configparser import RawConfigParser, ParsingError, NoOptionError
import uuid
DEFAULT_CONFIG_FILE_PATH = "~/.asciinema/config"
DEFAULT_API_URL = 'https://asciinema.org'
class Config:
def __init__(self, path=DEFAULT_CONFIG_FILE_PATH, overrides=None):
self.path = os.path.expanduser(path)
self.overrides = overrides if overrides is not None else os.environ
self._parse_config_file()
def _parse_config_file(self):
config = RawConfigParser()
config.add_section('user')
config.add_section('api')
try:
config.read(self.path)
except ParsingError:
print('Config file %s contains syntax errors' % self.path)
sys.exit(2)
self.config = config
@property
def api_url(self):
try:
api_url = self.config.get('api', 'url')
except NoOptionError:
api_url = DEFAULT_API_URL
api_url = self.overrides.get('ASCIINEMA_API_URL', api_url)
return api_url
@property
def api_token(self):
try:
return self._get_api_token()
except NoOptionError:
try:
return self._get_user_token()
except NoOptionError:
return self._create_api_token()
def _ensure_base_dir(self):
dir = os.path.dirname(self.path)
if not os.path.isdir(dir):
os.mkdir(dir)
def _get_api_token(self):
return self.config.get('api', 'token')
def _get_user_token(self):
return self.config.get('user', 'token')
def _create_api_token(self):
api_token = str(uuid.uuid1())
self.config.set('api', 'token', api_token)
self._ensure_base_dir()
with open(self.path, 'w') as f:
self.config.write(f)
return api_token
|
shaufi10/odoo
|
refs/heads/8.0
|
addons/account_payment/account_invoice.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import osv
class Invoice(osv.osv):
_inherit = 'account.invoice'
# Forbid to cancel an invoice if the related move lines have already been
# used in a payment order. The risk is that importing the payment line
# in the bank statement will result in a crash cause no more move will
# be found in the payment line
def action_cancel(self, cr, uid, ids, context=None):
payment_line_obj = self.pool.get('payment.line')
for inv in self.browse(cr, uid, ids, context=context):
pl_line_ids = []
if inv.move_id and inv.move_id.line_id:
inv_mv_lines = [x.id for x in inv.move_id.line_id]
pl_line_ids = payment_line_obj.search(cr, uid, [('move_line_id','in',inv_mv_lines)], context=context)
if pl_line_ids:
pay_line = payment_line_obj.browse(cr, uid, pl_line_ids, context=context)
payment_order_name = ','.join(map(lambda x: x.order_id.reference, pay_line))
raise osv.except_osv(_('Error!'), _("You cannot cancel an invoice which has already been imported in a payment order. Remove it from the following payment order : %s."%(payment_order_name)))
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
perezg/infoxchange
|
refs/heads/master
|
BASE/lib/python2.7/site-packages/dateutil/__init__.py
|
147
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__version__ = "2.1"
|
xuweiliang/Codelibrary
|
refs/heads/master
|
hav-gclient-3.2_newton/RDPSettingDialog.py
|
1
|
#!/usr/bin/env python
#coding=utf-8
import wx
import os
import re
import Logger
import commands
import subprocess
from subprocess import *
import threading
import time
import Resource
import Setting
import Util
class CheckHotplugDevice(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.onlineList = None
self.usbip = '/usr/sbin/'+'usbip'
def get_vid_and_pid(self, path, id):
vidFile = '/sys'+path+'/'+id
try:
fp = open(vidFile)
except IOError, e:
print e
return None
else:
#print 'success'
pass
id = fp.read().strip("\n")
fp.close()
return id
def is_input_device(self, vid, pid):
command = "lsusb -d %s:%s" % (vid, pid)
bp = os.popen(command, 'r')
content = bp.read().strip("\n")
print content
bp.close()
m1 = re.search('Mouse', content)
m2 = re.search('Keyboard', content)
m3 = re.search('Intel Corp', content)
m4 = re.search(r':\w{4}\s{1}\w+', content)
if m1 or m2 or m3 or (m4 == None):
return True
return False
def bind_device(self, port):
#if Setting.getRdpUsbip().lower() == 'true':
#return
time.sleep(0.1)
command = 'usbip bind -b %s' % port
Logger.info("Add: %s", command)
ret = os.system(command)
if ret == 0:
print 'bind device success: %s' % port
Logger.info("bind device success")
else:
print 'bind device fail: %s' % port
Logger.info("bind device fail")
def mount_device(self, port):
os.system('usbip attach --remote 127.0.0.1 --busid=%s' % port)
time.sleep(0.7)
os.system('usbip port')
os.system('usbip detach --port=0')
def unbind_device(self, port):
if Setting.getRdpUsbip().lower() == 'true':
return
command = '%s unbind --busid=%s' % (self.usbip, port)
print command
Logger.info("Remove: %s", command)
ret = os.system(command)
if ret == 0:
print 'unbind device success'
Logger.info("unbind device success")
else:
print 'unbind device fail'
Logger.info("unbind device fail")
def scan_device(self):
os.system('modprobe usbip-host')
os.system('modprobe vhci-hcd')
os.system('usbipd -D')
bp = os.popen('%s list -l' % self.usbip, 'r')
content = bp.read()
bp.close()
print content
port = re.findall(r' - busid (.*) ', content)
print port
l1 = re.findall(r'\(((?:[a-z]|\d)*):', content)
vid = sorted(set(l1),key=l1.index)
print vid
l2 = re.findall(r'(?:[a-zA-Z]|[0-9])+:(.*?)\)', content)
pid = sorted(set(l2),key=l2.index)
print pid
device = []
for (item1, item2, item3) in zip(vid, pid, port):
if (self.is_input_device(item1, item2)):
continue
vp = '%s:%s' % (item1, item2)
temp = [vp, item3] #[vid:pid, port]
device.append(temp)
return device
def unshare_all_device(self):
global usbip
bp = os.popen('%s list -l' % self.usbip, 'r')
content = bp.read()
bp.close()
port = re.findall(r'Port: (\S+)', content)
flag = re.findall(r'Status: (.*?)\n', content)
for (item3, item4) in zip(port, flag):
if 'shared' in item4:
self.unshare_device(item3)
def check_device_is_bind(self, port):
command = 'usbip list -r 127.0.0.1'
fp = os.popen(command, 'r')
content = fp.read()
fp.close()
if port in content:
return True
else:
return False
def run(self):
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
for action, device in monitor:
if 'BUSNUM' in device.keys():
if device['ACTION'] == 'add':
port = device['DEVPATH'].split("/")[5]
vid = self.get_vid_and_pid(device['DEVPATH'], 'idVendor')
if vid == None:
Logger.info('open vid file error')
continue
pid = self.get_vid_and_pid(device['DEVPATH'], 'idProduct')
if pid == None:
Logger.info('open pid file error')
continue
if (self.is_input_device(vid, pid)):
continue
self.bind_device(port)
if device['ACTION'] == 'remove':
port = device['DEVPATH'].split("/")[5]
#self.unbind_device(port)
class RDPSettingPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.VERTICAL)
# sizer.Add(wx.StaticText(self, -1, u' xxxxxxxxxxx'), 0, wx.EXPAND)
# sizer.AddSpacer(10)
self.allow_device = wx.CheckBox(self, -1, u"允许映射本地USB设备")
#self.auto_connect = wx.CheckBox(self, -1, u"断开连接后重新连接")
#self.headset_micro = wx.CheckBox(self, -1, u"允许映射本地耳机、麦克风设备")
self.remotefx = wx.CheckBox(self, -1, u"启用RemoteFX显示特性")
sizer.Add(self.allow_device, 0, wx.EXPAND)
sizer.AddSpacer(5)
#sizer.Add(self.auto_connect, 0, wx.EXPAND)
#sizer.AddSpacer(5)
#sizer.Add(self.headset_micro, 0, wx.EXPAND)
#sizer.AddSpacer(5)
sizer.Add(self.remotefx, 0, wx.EXPAND)
sizer.AddSpacer(20)
if Setting.getAllow_device().lower() == 'true':
self.allow_device.SetValue(True)
else:
self.allow_device.SetValue(False)
#if Setting.getAuto_connect().lower() == 'true':
# self.auto_connect.SetValue(True)
#else:
# self.auto_connect.SetValue(False)
#if Setting.getHeadset_micro().lower() == 'true':
# self.headset_micro.SetValue(True)
#else:
# self.headset_micro.SetValue(False)
if Setting.getRemotefx().lower() == 'true':
self.remotefx.SetValue(True)
else:
self.remotefx.SetValue(False)
self.mainSizer.Add(sizer, 0, wx.EXPAND)
label = wx.StaticText(self, -1, u'RDP设置:')
self.mainSizer.Add(label, 0, flag = wx.EXPAND | wx.ALL)
self.autoCheckBox = wx.CheckBox(self, -1, u'自动(将自动重定向除鼠标键盘之外的设备)')
self.Bind(wx.EVT_CHECKBOX, self.OnAutoCheckBox, self.autoCheckBox)
self.mainSizer.Add(self.autoCheckBox, 0, flag = wx.EXPAND)
self.mainSizer.AddSpacer(3)
label = wx.StaticText(self, -1, u' 点击检测按钮,进行设备检测,手动勾选进行重定向')
self.mainSizer.Add(label, 0, flag = wx.EXPAND)
self.refresh = wx.Button(self, -1, u'刷新')
self.mainSizer.Add(self.refresh, 0)
self.midSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer.Add(self.mainSizer, 0, wx.EXPAND)
self.topSizer.Add(self.midSizer, 0, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnRefresh, self.refresh)
if Setting.getRdpUsbip().lower() == 'true':
#auto
self.refresh.Enable(False)
self.autoCheckBox.SetValue(True)
else:
#sign
self.refresh.Enable(True)
self.autoCheckBox.SetValue(False)
self.usb_list = []
self.SetSizerAndFit(self.topSizer)
#self.SetSizer(Util.CreateCenterSizer(sizer, 10))
def OnAutoCheckBox(self, event):
if self.autoCheckBox.GetValue() == True:
#auto
self.refresh.Enable(False)
for device in self.usb_list:
device.Enable(False)
else:
#sign
self.refresh.Enable(True)
for device in self.usb_list:
device.Enable(True)
def OnRefresh(self, event):
self.midSizer = wx.BoxSizer(wx.VERTICAL)
self.usbipdialog = CheckHotplugDevice()
self.device = self.usbipdialog.scan_device()
for usb in self.usb_list:
usb.Destroy()
self.usb_list = []
value = 0
for i in self.device:
iManufacturer, iProduct = self.get_manufacturer_and_product(i[1])
usb = wx.CheckBox(self, value, iManufacturer + ' ' + iProduct)
self.Bind(wx.EVT_CHECKBOX, self.OnCheckBox, usb)
self.usb_list.append(usb)
self.midSizer.Add(usb, 3, wx.EXPAND)
value = value + 1
if self.usbipdialog.check_device_is_bind(i[1]):
usb.SetValue(True)
else:
usb.SetValue(False)
self.topSizer.Add(self.midSizer, 0, wx.EXPAND)
self.SetSizerAndFit(self.topSizer)
def OnCheckBox(self, event):
id = event.GetId()
#print self.usb_list[id].GetValue()
#print self.device[id]
def get_manufacturer_and_product(self, port):
path = '/sys/bus/usb/devices/' + port
manufacturerPath = path + '/manufacturer'
if os.path.exists(manufacturerPath):
manufacturer = self.open_and_read_file(manufacturerPath)
else:
manufacturer = 'None'
productPath = path + '/product'
product = self.open_and_read_file(productPath)
return manufacturer, product
def open_and_read_file(self, name):
fp = open(name)
content = fp.read().strip('\n')
fp.close()
return content
def OnSave(self):
Setting.setAllow_device('%s' % self.allow_device.GetValue())
#Setting.setAuto_connect('%s' % self.auto_connect.GetValue())
#Setting.setHeadset_micro('%s' % self.headset_micro.GetValue())
Setting.setRemotefx('%s' % self.remotefx.GetValue())
Setting.setRdpUsbip('%s' % self.autoCheckBox.GetValue())
Setting.save()
if self.autoCheckBox.GetValue() == True:
self.OnRefresh(-1)
for device in self.usb_list:
self.usbipdialog.bind_device(self.device[device.GetId()][1])
self.usbipdialog.mount_device(self.device[device.GetId()][1])
else:
for device in self.usb_list:
if True == device.GetValue():
print self.device[device.GetId()]
#if self.usbipdialog.check_device_is_bind(self.device[device.GetId()][1]) == False:
self.usbipdialog.bind_device(self.device[device.GetId()][1])
self.usbipdialog.mount_device(self.device[device.GetId()][1])
else:
print self.device[device.GetId()]
if self.usbipdialog.check_device_is_bind(self.device[device.GetId()][1]) == True:
self.usbipdialog.unbind_device(self.device[device.GetId()][1])
if __name__ == '__main__':
thread = CheckHotplugDevice()
thread.start()
app = wx.PySimpleApp()
frame = wx.Frame(None)
ds = RDPSettingPanel(frame)
frame.Show()
app.MainLoop()
|
jmuhlich/bayessb
|
refs/heads/master
|
examples/pymc/lognormal_test.py
|
1
|
"""A file to make sure we are setting the mean and variance for PyMC
Lognormal variables correctly--since we are used to describing them with
the mean and variance in log base 10."""
from pymc import deterministic, stochastic, MvNormal, Normal, Lognormal, Uniform
from pymc import MCMC, Model
import numpy as np
from pylab import *
# The mu and tau are in log units; to get to log units,
# do the following
# (has mean around 1e2, with a variance of 9 logs in base 10)
mean_b10 = 2
var_b10 = 9
print "Setting mean (base 10) to %f, variance (base 10) to %f" % (mean_b10, var_b10)
# The lognormal variable
k = Lognormal('k', mu=np.log(10 ** mean_b10),
tau=1./(np.log(10) * np.log(10 ** var_b10)))
# Sample it
m = MCMC(Model([k]))
m.sample(iter=50000)
ion()
# Plot the distribution in base e
figure()
y = log(m.trace('k')[:])
y10 = log10(m.trace('k')[:])
hist(y, bins=100)
print
print "Mean, base e: %f; Variance, base e: %f" % (mean(y), var(y))
# Plot the distribution in base 10
figure()
hist(y10, bins=100)
print "Mean, base 10: %f; Variance, base 10: %f" % (mean(y10), var(y10))
|
McNetic/couchpotato-ger
|
refs/heads/master
|
library/hachoir_parser/image/wmf.py
|
86
|
"""
Hachoir parser of Microsoft Windows Metafile (WMF) file format.
Documentation:
- Microsoft Windows Metafile; also known as: WMF,
Enhanced Metafile, EMF, APM
http://wvware.sourceforge.net/caolan/ora-wmf.html
- libwmf source code:
- include/libwmf/defs.h: enums
- src/player/meta.h: arguments parsers
- libemf source code
Author: Victor Stinner
Creation date: 26 december 2006
"""
MAX_FILESIZE = 50 * 1024 * 1024
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, StaticFieldSet, Enum,
MissingField, ParserError,
UInt32, Int32, UInt16, Int16, UInt8, NullBytes, RawBytes, String)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import createDict
from hachoir_parser.image.common import RGBA
POLYFILL_MODE = {1: "Alternate", 2: "Winding"}
BRUSH_STYLE = {
0: u"Solid",
1: u"Null",
2: u"Hollow",
3: u"Pattern",
4: u"Indexed",
5: u"DIB pattern",
6: u"DIB pattern point",
7: u"Pattern 8x8",
8: u"DIB pattern 8x8",
}
HATCH_STYLE = {
0: u"Horizontal", # -----
1: u"Vertical", # |||||
2: u"FDIAGONAL", # \\\\\
3: u"BDIAGONAL", # /////
4: u"Cross", # +++++
5: u"Diagonal cross", # xxxxx
}
PEN_STYLE = {
0: u"Solid",
1: u"Dash", # -------
2: u"Dot", # .......
3: u"Dash dot", # _._._._
4: u"Dash dot dot", # _.._.._
5: u"Null",
6: u"Inside frame",
7: u"User style",
8: u"Alternate",
}
# Binary raster operations
ROP2_DESC = {
1: u"Black (0)",
2: u"Not merge pen (DPon)",
3: u"Mask not pen (DPna)",
4: u"Not copy pen (PN)",
5: u"Mask pen not (PDna)",
6: u"Not (Dn)",
7: u"Xor pen (DPx)",
8: u"Not mask pen (DPan)",
9: u"Mask pen (DPa)",
10: u"Not xor pen (DPxn)",
11: u"No operation (D)",
12: u"Merge not pen (DPno)",
13: u"Copy pen (P)",
14: u"Merge pen not (PDno)",
15: u"Merge pen (DPo)",
16: u"White (1)",
}
def parseXY(parser):
yield Int16(parser, "x")
yield Int16(parser, "y")
def parseCreateBrushIndirect(parser):
yield Enum(UInt16(parser, "brush_style"), BRUSH_STYLE)
yield RGBA(parser, "color")
yield Enum(UInt16(parser, "brush_hatch"), HATCH_STYLE)
def parsePenIndirect(parser):
yield Enum(UInt16(parser, "pen_style"), PEN_STYLE)
yield UInt16(parser, "pen_width")
yield UInt16(parser, "pen_height")
yield RGBA(parser, "color")
def parsePolyFillMode(parser):
yield Enum(UInt16(parser, "operation"), POLYFILL_MODE)
def parseROP2(parser):
yield Enum(UInt16(parser, "operation"), ROP2_DESC)
def parseObjectID(parser):
yield UInt16(parser, "object_id")
class Point(FieldSet):
static_size = 32
def createFields(self):
yield Int16(self, "x")
yield Int16(self, "y")
def createDescription(self):
return "Point (%s, %s)" % (self["x"].value, self["y"].value)
def parsePolygon(parser):
yield UInt16(parser, "count")
for index in xrange(parser["count"].value):
yield Point(parser, "point[]")
META = {
0x0000: ("EOF", u"End of file", None),
0x001E: ("SAVEDC", u"Save device context", None),
0x0035: ("REALIZEPALETTE", u"Realize palette", None),
0x0037: ("SETPALENTRIES", u"Set palette entries", None),
0x00f7: ("CREATEPALETTE", u"Create palette", None),
0x0102: ("SETBKMODE", u"Set background mode", None),
0x0103: ("SETMAPMODE", u"Set mapping mode", None),
0x0104: ("SETROP2", u"Set foreground mix mode", parseROP2),
0x0106: ("SETPOLYFILLMODE", u"Set polygon fill mode", parsePolyFillMode),
0x0107: ("SETSTRETCHBLTMODE", u"Set bitmap streching mode", None),
0x0108: ("SETTEXTCHAREXTRA", u"Set text character extra", None),
0x0127: ("RESTOREDC", u"Restore device context", None),
0x012A: ("INVERTREGION", u"Invert region", None),
0x012B: ("PAINTREGION", u"Paint region", None),
0x012C: ("SELECTCLIPREGION", u"Select clipping region", None),
0x012D: ("SELECTOBJECT", u"Select object", parseObjectID),
0x012E: ("SETTEXTALIGN", u"Set text alignment", None),
0x0142: ("CREATEDIBPATTERNBRUSH", u"Create DIB brush with specified pattern", None),
0x01f0: ("DELETEOBJECT", u"Delete object", parseObjectID),
0x0201: ("SETBKCOLOR", u"Set background color", None),
0x0209: ("SETTEXTCOLOR", u"Set text color", None),
0x020A: ("SETTEXTJUSTIFICATION", u"Set text justification", None),
0x020B: ("SETWINDOWORG", u"Set window origin", parseXY),
0x020C: ("SETWINDOWEXT", u"Set window extends", parseXY),
0x020D: ("SETVIEWPORTORG", u"Set view port origin", None),
0x020E: ("SETVIEWPORTEXT", u"Set view port extends", None),
0x020F: ("OFFSETWINDOWORG", u"Offset window origin", None),
0x0211: ("OFFSETVIEWPORTORG", u"Offset view port origin", None),
0x0213: ("LINETO", u"Draw a line to", None),
0x0214: ("MOVETO", u"Move to", None),
0x0220: ("OFFSETCLIPRGN", u"Offset clipping rectangle", None),
0x0228: ("FILLREGION", u"Fill region", None),
0x0231: ("SETMAPPERFLAGS", u"Set mapper flags", None),
0x0234: ("SELECTPALETTE", u"Select palette", None),
0x02FB: ("CREATEFONTINDIRECT", u"Create font indirect", None),
0x02FA: ("CREATEPENINDIRECT", u"Create pen indirect", parsePenIndirect),
0x02FC: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseCreateBrushIndirect),
0x0324: ("POLYGON", u"Draw a polygon", parsePolygon),
0x0325: ("POLYLINE", u"Draw a polyline", None),
0x0410: ("SCALEWINDOWEXT", u"Scale window extends", None),
0x0412: ("SCALEVIEWPORTEXT", u"Scale view port extends", None),
0x0415: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None),
0x0416: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None),
0x0418: ("ELLIPSE", u"Draw an ellipse", None),
0x0419: ("FLOODFILL", u"Flood fill", None),
0x041B: ("RECTANGLE", u"Draw a rectangle", None),
0x041F: ("SETPIXEL", u"Set pixel", None),
0x0429: ("FRAMEREGION", u"Fram region", None),
0x0521: ("TEXTOUT", u"Draw text", None),
0x0538: ("POLYPOLYGON", u"Draw multiple polygons", None),
0x0548: ("EXTFLOODFILL", u"Extend flood fill", None),
0x061C: ("ROUNDRECT", u"Draw a rounded rectangle", None),
0x061D: ("PATBLT", u"Pattern blitting", None),
0x0626: ("ESCAPE", u"Escape", None),
0x06FF: ("CREATEREGION", u"Create region", None),
0x0817: ("ARC", u"Draw an arc", None),
0x081A: ("PIE", u"Draw a pie", None),
0x0830: ("CHORD", u"Draw a chord", None),
0x0940: ("DIBBITBLT", u"DIB bit blitting", None),
0x0a32: ("EXTTEXTOUT", u"Draw text (extra)", None),
0x0b41: ("DIBSTRETCHBLT", u"DIB stretch blitting", None),
0x0d33: ("SETDIBTODEV", u"Set DIB to device", None),
0x0f43: ("STRETCHDIB", u"Stretch DIB", None),
}
META_NAME = createDict(META, 0)
META_DESC = createDict(META, 1)
#----------------------------------------------------------------------------
# EMF constants
# EMF mapping modes
EMF_MAPPING_MODE = {
1: "TEXT",
2: "LOMETRIC",
3: "HIMETRIC",
4: "LOENGLISH",
5: "HIENGLISH",
6: "TWIPS",
7: "ISOTROPIC",
8: "ANISOTROPIC",
}
#----------------------------------------------------------------------------
# EMF parser
def parseEmfMappingMode(parser):
yield Enum(Int32(parser, "mapping_mode"), EMF_MAPPING_MODE)
def parseXY32(parser):
yield Int32(parser, "x")
yield Int32(parser, "y")
def parseObjectID32(parser):
yield textHandler(UInt32(parser, "object_id"), hexadecimal)
def parseBrushIndirect(parser):
yield UInt32(parser, "ihBrush")
yield UInt32(parser, "style")
yield RGBA(parser, "color")
yield Int32(parser, "hatch")
class Point16(FieldSet):
static_size = 32
def createFields(self):
yield Int16(self, "x")
yield Int16(self, "y")
def createDescription(self):
return "Point16: (%i,%i)" % (self["x"].value, self["y"].value)
def parsePoint16array(parser):
yield RECT32(parser, "bounds")
yield UInt32(parser, "count")
for index in xrange(parser["count"].value):
yield Point16(parser, "point[]")
def parseGDIComment(parser):
yield UInt32(parser, "data_size")
size = parser["data_size"].value
if size:
yield RawBytes(parser, "data", size)
def parseICMMode(parser):
yield UInt32(parser, "icm_mode")
def parseExtCreatePen(parser):
yield UInt32(parser, "ihPen")
yield UInt32(parser, "offBmi")
yield UInt32(parser, "cbBmi")
yield UInt32(parser, "offBits")
yield UInt32(parser, "cbBits")
yield UInt32(parser, "pen_style")
yield UInt32(parser, "width")
yield UInt32(parser, "brush_style")
yield RGBA(parser, "color")
yield UInt32(parser, "hatch")
yield UInt32(parser, "nb_style")
for index in xrange(parser["nb_style"].value):
yield UInt32(parser, "style")
EMF_META = {
1: ("HEADER", u"Header", None),
2: ("POLYBEZIER", u"Draw poly bezier", None),
3: ("POLYGON", u"Draw polygon", None),
4: ("POLYLINE", u"Draw polyline", None),
5: ("POLYBEZIERTO", u"Draw poly bezier to", None),
6: ("POLYLINETO", u"Draw poly line to", None),
7: ("POLYPOLYLINE", u"Draw poly polyline", None),
8: ("POLYPOLYGON", u"Draw poly polygon", None),
9: ("SETWINDOWEXTEX", u"Set window extend EX", parseXY32),
10: ("SETWINDOWORGEX", u"Set window origin EX", parseXY32),
11: ("SETVIEWPORTEXTEX", u"Set viewport extend EX", parseXY32),
12: ("SETVIEWPORTORGEX", u"Set viewport origin EX", parseXY32),
13: ("SETBRUSHORGEX", u"Set brush org EX", None),
14: ("EOF", u"End of file", None),
15: ("SETPIXELV", u"Set pixel V", None),
16: ("SETMAPPERFLAGS", u"Set mapper flags", None),
17: ("SETMAPMODE", u"Set mapping mode", parseEmfMappingMode),
18: ("SETBKMODE", u"Set background mode", None),
19: ("SETPOLYFILLMODE", u"Set polyfill mode", None),
20: ("SETROP2", u"Set ROP2", None),
21: ("SETSTRETCHBLTMODE", u"Set stretching blitting mode", None),
22: ("SETTEXTALIGN", u"Set text align", None),
23: ("SETCOLORADJUSTMENT", u"Set color adjustment", None),
24: ("SETTEXTCOLOR", u"Set text color", None),
25: ("SETBKCOLOR", u"Set background color", None),
26: ("OFFSETCLIPRGN", u"Offset clipping region", None),
27: ("MOVETOEX", u"Move to EX", parseXY32),
28: ("SETMETARGN", u"Set meta region", None),
29: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None),
30: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None),
31: ("SCALEVIEWPORTEXTEX", u"Scale viewport extend EX", None),
32: ("SCALEWINDOWEXTEX", u"Scale window extend EX", None),
33: ("SAVEDC", u"Save device context", None),
34: ("RESTOREDC", u"Restore device context", None),
35: ("SETWORLDTRANSFORM", u"Set world transform", None),
36: ("MODIFYWORLDTRANSFORM", u"Modify world transform", None),
37: ("SELECTOBJECT", u"Select object", parseObjectID32),
38: ("CREATEPEN", u"Create pen", None),
39: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseBrushIndirect),
40: ("DELETEOBJECT", u"Delete object", parseObjectID32),
41: ("ANGLEARC", u"Draw angle arc", None),
42: ("ELLIPSE", u"Draw ellipse", None),
43: ("RECTANGLE", u"Draw rectangle", None),
44: ("ROUNDRECT", u"Draw rounded rectangle", None),
45: ("ARC", u"Draw arc", None),
46: ("CHORD", u"Draw chord", None),
47: ("PIE", u"Draw pie", None),
48: ("SELECTPALETTE", u"Select palette", None),
49: ("CREATEPALETTE", u"Create palette", None),
50: ("SETPALETTEENTRIES", u"Set palette entries", None),
51: ("RESIZEPALETTE", u"Resize palette", None),
52: ("REALIZEPALETTE", u"Realize palette", None),
53: ("EXTFLOODFILL", u"EXT flood fill", None),
54: ("LINETO", u"Draw line to", parseXY32),
55: ("ARCTO", u"Draw arc to", None),
56: ("POLYDRAW", u"Draw poly draw", None),
57: ("SETARCDIRECTION", u"Set arc direction", None),
58: ("SETMITERLIMIT", u"Set miter limit", None),
59: ("BEGINPATH", u"Begin path", None),
60: ("ENDPATH", u"End path", None),
61: ("CLOSEFIGURE", u"Close figure", None),
62: ("FILLPATH", u"Fill path", None),
63: ("STROKEANDFILLPATH", u"Stroke and fill path", None),
64: ("STROKEPATH", u"Stroke path", None),
65: ("FLATTENPATH", u"Flatten path", None),
66: ("WIDENPATH", u"Widen path", None),
67: ("SELECTCLIPPATH", u"Select clipping path", None),
68: ("ABORTPATH", u"Arbort path", None),
70: ("GDICOMMENT", u"GDI comment", parseGDIComment),
71: ("FILLRGN", u"Fill region", None),
72: ("FRAMERGN", u"Frame region", None),
73: ("INVERTRGN", u"Invert region", None),
74: ("PAINTRGN", u"Paint region", None),
75: ("EXTSELECTCLIPRGN", u"EXT select clipping region", None),
76: ("BITBLT", u"Bit blitting", None),
77: ("STRETCHBLT", u"Stretch blitting", None),
78: ("MASKBLT", u"Mask blitting", None),
79: ("PLGBLT", u"PLG blitting", None),
80: ("SETDIBITSTODEVICE", u"Set DIB bits to device", None),
81: ("STRETCHDIBITS", u"Stretch DIB bits", None),
82: ("EXTCREATEFONTINDIRECTW", u"EXT create font indirect W", None),
83: ("EXTTEXTOUTA", u"EXT text out A", None),
84: ("EXTTEXTOUTW", u"EXT text out W", None),
85: ("POLYBEZIER16", u"Draw poly bezier (16-bit)", None),
86: ("POLYGON16", u"Draw polygon (16-bit)", parsePoint16array),
87: ("POLYLINE16", u"Draw polyline (16-bit)", parsePoint16array),
88: ("POLYBEZIERTO16", u"Draw poly bezier to (16-bit)", parsePoint16array),
89: ("POLYLINETO16", u"Draw polyline to (16-bit)", parsePoint16array),
90: ("POLYPOLYLINE16", u"Draw poly polyline (16-bit)", None),
91: ("POLYPOLYGON16", u"Draw poly polygon (16-bit)", parsePoint16array),
92: ("POLYDRAW16", u"Draw poly draw (16-bit)", None),
93: ("CREATEMONOBRUSH", u"Create monobrush", None),
94: ("CREATEDIBPATTERNBRUSHPT", u"Create DIB pattern brush PT", None),
95: ("EXTCREATEPEN", u"EXT create pen", parseExtCreatePen),
96: ("POLYTEXTOUTA", u"Poly text out A", None),
97: ("POLYTEXTOUTW", u"Poly text out W", None),
98: ("SETICMMODE", u"Set ICM mode", parseICMMode),
99: ("CREATECOLORSPACE", u"Create color space", None),
100: ("SETCOLORSPACE", u"Set color space", None),
101: ("DELETECOLORSPACE", u"Delete color space", None),
102: ("GLSRECORD", u"GLS record", None),
103: ("GLSBOUNDEDRECORD", u"GLS bound ED record", None),
104: ("PIXELFORMAT", u"Pixel format", None),
}
EMF_META_NAME = createDict(EMF_META, 0)
EMF_META_DESC = createDict(EMF_META, 1)
class Function(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self.root.isEMF():
self._size = self["size"].value * 8
else:
self._size = self["size"].value * 16
def createFields(self):
if self.root.isEMF():
yield Enum(UInt32(self, "function"), EMF_META_NAME)
yield UInt32(self, "size")
try:
parser = EMF_META[self["function"].value][2]
except KeyError:
parser = None
else:
yield UInt32(self, "size")
yield Enum(UInt16(self, "function"), META_NAME)
try:
parser = META[self["function"].value][2]
except KeyError:
parser = None
if parser:
for field in parser(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
func = self["function"]
return func.value in func.getEnum()
def createDescription(self):
if self.root.isEMF():
return EMF_META_DESC[self["function"].value]
try:
return META_DESC[self["function"].value]
except KeyError:
return "Function %s" % self["function"].display
class RECT16(StaticFieldSet):
format = (
(Int16, "left"),
(Int16, "top"),
(Int16, "right"),
(Int16, "bottom"),
)
def createDescription(self):
return "%s: %ux%u at (%u,%u)" % (
self.__class__.__name__,
self["right"].value-self["left"].value,
self["bottom"].value-self["top"].value,
self["left"].value,
self["top"].value)
class RECT32(RECT16):
format = (
(Int32, "left"),
(Int32, "top"),
(Int32, "right"),
(Int32, "bottom"),
)
class PlaceableHeader(FieldSet):
"""
Header of Placeable Metafile (file extension .APM),
created by Aldus Corporation
"""
MAGIC = "\xD7\xCD\xC6\x9A\0\0" # (magic, handle=0x0000)
def createFields(self):
yield textHandler(UInt32(self, "signature", "Placeable Metafiles signature (0x9AC6CDD7)"), hexadecimal)
yield UInt16(self, "handle")
yield RECT16(self, "rect")
yield UInt16(self, "inch")
yield NullBytes(self, "reserved", 4)
yield textHandler(UInt16(self, "checksum"), hexadecimal)
class EMF_Header(FieldSet):
MAGIC = "\x20\x45\x4D\x46\0\0" # (magic, min_ver=0x0000)
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
LONG = Int32
yield UInt32(self, "type", "Record type (always 1)")
yield UInt32(self, "size", "Size of the header in bytes")
yield RECT32(self, "Bounds", "Inclusive bounds")
yield RECT32(self, "Frame", "Inclusive picture frame")
yield textHandler(UInt32(self, "signature", "Signature ID (always 0x464D4520)"), hexadecimal)
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "maj_ver", "Major version")
yield UInt32(self, "file_size", "Size of the file in bytes")
yield UInt32(self, "NumOfRecords", "Number of records in the metafile")
yield UInt16(self, "NumOfHandles", "Number of handles in the handle table")
yield NullBytes(self, "reserved", 2)
yield UInt32(self, "desc_size", "Size of description in 16-bit words")
yield UInt32(self, "desc_ofst", "Offset of description string in metafile")
yield UInt32(self, "nb_colors", "Number of color palette entries")
yield LONG(self, "width_px", "Width of reference device in pixels")
yield LONG(self, "height_px", "Height of reference device in pixels")
yield LONG(self, "width_mm", "Width of reference device in millimeters")
yield LONG(self, "height_mm", "Height of reference device in millimeters")
# Read description (if any)
offset = self["desc_ofst"].value
current = (self.absolute_address + self.current_size) // 8
size = self["desc_size"].value * 2
if offset == current and size:
yield String(self, "description", size, charset="UTF-16-LE", strip="\0 ")
# Read padding (if any)
size = self["size"].value - self.current_size//8
if size:
yield RawBytes(self, "padding", size)
class WMF_File(Parser):
PARSER_TAGS = {
"id": "wmf",
"category": "image",
"file_ext": ("wmf", "apm", "emf"),
"mime": (
u"image/wmf", u"image/x-wmf", u"image/x-win-metafile",
u"application/x-msmetafile", u"application/wmf", u"application/x-wmf",
u"image/x-emf"),
"magic": (
(PlaceableHeader.MAGIC, 0),
(EMF_Header.MAGIC, 40*8),
# WMF: file_type=memory, header size=9, version=3.0
("\0\0\x09\0\0\3", 0),
# WMF: file_type=disk, header size=9, version=3.0
("\1\0\x09\0\0\3", 0),
),
"min_size": 40*8,
"description": u"Microsoft Windows Metafile (WMF)",
}
endian = LITTLE_ENDIAN
FILE_TYPE = {0: "memory", 1: "disk"}
def validate(self):
if self.isEMF():
# Check EMF header
emf = self["emf_header"]
if emf["signature"].value != 0x464D4520:
return "Invalid signature"
if emf["type"].value != 1:
return "Invalid record type"
if emf["reserved"].value != "\0\0":
return "Invalid reserved"
else:
# Check AMF header
if self.isAPM():
amf = self["amf_header"]
if amf["handle"].value != 0:
return "Invalid handle"
if amf["reserved"].value != "\0\0\0\0":
return "Invalid reserved"
# Check common header
if self["file_type"].value not in (0, 1):
return "Invalid file type"
if self["header_size"].value != 9:
return "Invalid header size"
if self["nb_params"].value != 0:
return "Invalid number of parameters"
# Check first functions
for index in xrange(5):
try:
func = self["func[%u]" % index]
except MissingField:
if self.done:
return True
return "Unable to get function #%u" % index
except ParserError:
return "Unable to create function #%u" % index
# Check first frame values
if not func.isValid():
return "Function #%u is invalid" % index
return True
def createFields(self):
if self.isEMF():
yield EMF_Header(self, "emf_header")
else:
if self.isAPM():
yield PlaceableHeader(self, "amf_header")
yield Enum(UInt16(self, "file_type"), self.FILE_TYPE)
yield UInt16(self, "header_size", "Size of header in 16-bit words (always 9)")
yield UInt8(self, "win_ver_min", "Minor version of Microsoft Windows")
yield UInt8(self, "win_ver_maj", "Major version of Microsoft Windows")
yield UInt32(self, "file_size", "Total size of the metafile in 16-bit words")
yield UInt16(self, "nb_obj", "Number of objects in the file")
yield UInt32(self, "max_record_size", "The size of largest record in 16-bit words")
yield UInt16(self, "nb_params", "Not Used (always 0)")
while not(self.eof):
yield Function(self, "func[]")
def isEMF(self):
"""File is in EMF format?"""
if 1 <= self.current_length:
return self[0].name == "emf_header"
if self.size < 44*8:
return False
magic = EMF_Header.MAGIC
return self.stream.readBytes(40*8, len(magic)) == magic
def isAPM(self):
"""File is in Aldus Placeable Metafiles format?"""
if 1 <= self.current_length:
return self[0].name == "amf_header"
else:
magic = PlaceableHeader.MAGIC
return (self.stream.readBytes(0, len(magic)) == magic)
def createDescription(self):
if self.isEMF():
return u"Microsoft Enhanced Metafile (EMF) picture"
elif self.isAPM():
return u"Aldus Placeable Metafile (APM) picture"
else:
return u"Microsoft Windows Metafile (WMF) picture"
def createMimeType(self):
if self.isEMF():
return u"image/x-emf"
else:
return u"image/wmf"
def createContentSize(self):
if self.isEMF():
return None
start = self["func[0]"].absolute_address
end = self.stream.searchBytes("\3\0\0\0\0\0", start, MAX_FILESIZE * 8)
if end is not None:
return end + 6*8
return None
|
anksp21/Community-Zenpacks
|
refs/heads/master
|
ZenPacks.ZenSystems.Juniper/ZenPacks/ZenSystems/Juniper/migrate/__init__.py
|
1165
|
# __init__.py
|
eusi/MissionPlanerHM
|
refs/heads/master
|
Lib/distutils/command/register.py
|
75
|
"""distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id$"
import urllib2
import getpass
import urlparse
import StringIO
from warnings import warn
from distutils.core import PyPIRCCommand
from distutils import log
class register(PyPIRCCommand):
description = ("register the distribution with the Python package index")
user_options = PyPIRCCommand.user_options + [
('list-classifiers', None,
'list the valid Trove classifiers'),
('strict', None ,
'Will stop the registering if the meta-data are not fully compliant')
]
boolean_options = PyPIRCCommand.boolean_options + [
'verify', 'list-classifiers', 'strict']
sub_commands = [('check', lambda self: True)]
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.list_classifiers = 0
self.strict = 0
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
# setting options for the `check` subcommand
check_options = {'strict': ('register', self.strict),
'restructuredtext': ('register', 1)}
self.distribution.command_options['check'] = check_options
def run(self):
self.finalize_options()
self._set_config()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run()
def _set_config(self):
''' Reads the configuration file and set attributes.
'''
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
self.has_config = True
else:
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
raise ValueError('%s not found in .pypirc' % self.repository)
if self.repository == 'pypi':
self.repository = self.DEFAULT_REPOSITORY
self.has_config = False
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
log.info(response.read())
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s' % (code, result))
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[distutils] containing username and password entries (both
in clear text). Eg:
[distutils]
index-servers =
pypi
[pypi]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
# see if we can short-cut and get the username/password from the
# config
if self.has_config:
choice = '1'
username = self.username
password = self.password
else:
choice = 'x'
username = password = ''
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
self.announce('''\
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''', log.INFO)
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.announce('Server response (%s): %s' % (code, result),
log.INFO)
# possibly save the login
if code == 200:
if self.has_config:
# sharing the password in the distribution instance
# so the upload command can reuse it
self.distribution.password = password
else:
self.announce(('I can store your PyPI login so future '
'submissions will be faster.'), log.INFO)
self.announce('(the login will be stored in %s)' % \
self._get_rc_file(), log.INFO)
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
self._store_pypirc(username, password)
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
log.info('Server response (%s): %s' % (code, result))
else:
log.info('You will receive an email shortly.')
log.info(('Follow the instructions in it to '
'complete registration.'))
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
log.info('Server response (%s): %s' % (code, result))
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
if 'name' in data:
self.announce('Registering %s to %s' % (data['name'],
self.repository),
log.INFO)
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
dashes = '-' * 75
self.announce('%s%s%s' % (dashes, data, dashes))
return result
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/src/Lib/test/test_mailbox.py
|
53
|
import os
import sys
import time
import stat
import socket
import email
import email.message
import rfc822
import re
import StringIO
from test import test_support
import unittest
import mailbox
import glob
try:
import fcntl
except ImportError:
pass
class TestBase(unittest.TestCase):
def _check_sample(self, msg):
# Inspect a mailbox.Message representation of the sample message
self.assert_(isinstance(msg, email.message.Message))
self.assert_(isinstance(msg, mailbox.Message))
for key, value in _sample_headers.iteritems():
self.assert_(value in msg.get_all(key))
self.assert_(msg.is_multipart())
self.assert_(len(msg.get_payload()) == len(_sample_payloads))
for i, payload in enumerate(_sample_payloads):
part = msg.get_payload(i)
self.assert_(isinstance(part, email.message.Message))
self.assert_(not isinstance(part, mailbox.Message))
self.assert_(part.get_payload() == payload)
def _delete_recursively(self, target):
# Delete a file or delete a directory recursively
if os.path.isdir(target):
for path, dirs, files in os.walk(target, topdown=False):
for name in files:
os.remove(os.path.join(path, name))
for name in dirs:
os.rmdir(os.path.join(path, name))
os.rmdir(target)
elif os.path.exists(target):
os.remove(target)
class TestMailbox(TestBase):
_factory = None # Overridden by subclasses to reuse tests
_template = 'From: foo\n\n%s'
def setUp(self):
self._path = test_support.TESTFN
self._delete_recursively(self._path)
self._box = self._factory(self._path)
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
def test_add(self):
# Add copies of a sample message
keys = []
keys.append(self._box.add(self._template % 0))
self.assert_(len(self._box) == 1)
keys.append(self._box.add(mailbox.Message(_sample_message)))
self.assert_(len(self._box) == 2)
keys.append(self._box.add(email.message_from_string(_sample_message)))
self.assert_(len(self._box) == 3)
keys.append(self._box.add(StringIO.StringIO(_sample_message)))
self.assert_(len(self._box) == 4)
keys.append(self._box.add(_sample_message))
self.assert_(len(self._box) == 5)
self.assert_(self._box.get_string(keys[0]) == self._template % 0)
for i in (1, 2, 3, 4):
self._check_sample(self._box[keys[i]])
def test_remove(self):
# Remove messages using remove()
self._test_remove_or_delitem(self._box.remove)
def test_delitem(self):
# Remove messages using __delitem__()
self._test_remove_or_delitem(self._box.__delitem__)
def _test_remove_or_delitem(self, method):
# (Used by test_remove() and test_delitem().)
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assert_(len(self._box) == 2)
method(key0)
l = len(self._box)
self.assert_(l == 1, "actual l: %s" % l)
self.assertRaises(KeyError, lambda: self._box[key0])
self.assertRaises(KeyError, lambda: method(key0))
self.assert_(self._box.get_string(key1) == self._template % 1)
key2 = self._box.add(self._template % 2)
self.assert_(len(self._box) == 2)
method(key2)
l = len(self._box)
self.assert_(l == 1, "actual l: %s" % l)
self.assertRaises(KeyError, lambda: self._box[key2])
self.assertRaises(KeyError, lambda: method(key2))
self.assert_(self._box.get_string(key1) == self._template % 1)
method(key1)
self.assert_(len(self._box) == 0)
self.assertRaises(KeyError, lambda: self._box[key1])
self.assertRaises(KeyError, lambda: method(key1))
def test_discard(self, repetitions=10):
# Discard messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assert_(len(self._box) == 2)
self._box.discard(key0)
self.assert_(len(self._box) == 1)
self.assertRaises(KeyError, lambda: self._box[key0])
self._box.discard(key0)
self.assert_(len(self._box) == 1)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get(self):
# Retrieve messages using get()
key0 = self._box.add(self._template % 0)
msg = self._box.get(key0)
self.assert_(msg['from'] == 'foo')
self.assert_(msg.get_payload() == '0')
self.assert_(self._box.get('foo') is None)
self.assert_(self._box.get('foo', False) is False)
self._box.close()
self._box = self._factory(self._path, factory=rfc822.Message)
key1 = self._box.add(self._template % 1)
msg = self._box.get(key1)
self.assert_(msg['from'] == 'foo')
self.assert_(msg.fp.read() == '1')
def test_getitem(self):
# Retrieve message using __getitem__()
key0 = self._box.add(self._template % 0)
msg = self._box[key0]
self.assert_(msg['from'] == 'foo')
self.assert_(msg.get_payload() == '0')
self.assertRaises(KeyError, lambda: self._box['foo'])
self._box.discard(key0)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get_message(self):
# Get Message representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
msg0 = self._box.get_message(key0)
self.assert_(isinstance(msg0, mailbox.Message))
self.assert_(msg0['from'] == 'foo')
self.assert_(msg0.get_payload() == '0')
self._check_sample(self._box.get_message(key1))
def test_get_string(self):
# Get string representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
self.assert_(self._box.get_string(key0) == self._template % 0)
self.assert_(self._box.get_string(key1) == _sample_message)
def test_get_file(self):
# Get file representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
self.assert_(self._box.get_file(key0).read().replace(os.linesep, '\n')
== self._template % 0)
self.assert_(self._box.get_file(key1).read().replace(os.linesep, '\n')
== _sample_message)
def test_iterkeys(self):
# Get keys using iterkeys()
self._check_iteration(self._box.iterkeys, do_keys=True, do_values=False)
def test_keys(self):
# Get keys using keys()
self._check_iteration(self._box.keys, do_keys=True, do_values=False)
def test_itervalues(self):
# Get values using itervalues()
self._check_iteration(self._box.itervalues, do_keys=False,
do_values=True)
def test_iter(self):
# Get values using __iter__()
self._check_iteration(self._box.__iter__, do_keys=False,
do_values=True)
def test_values(self):
# Get values using values()
self._check_iteration(self._box.values, do_keys=False, do_values=True)
def test_iteritems(self):
# Get keys and values using iteritems()
self._check_iteration(self._box.iteritems, do_keys=True,
do_values=True)
def test_items(self):
# Get keys and values using items()
self._check_iteration(self._box.items, do_keys=True, do_values=True)
def _check_iteration(self, method, do_keys, do_values, repetitions=10):
for value in method():
self.fail("Not empty")
keys, values = [], []
for i in xrange(repetitions):
keys.append(self._box.add(self._template % i))
values.append(self._template % i)
if do_keys and not do_values:
returned_keys = list(method())
elif do_values and not do_keys:
returned_values = list(method())
else:
returned_keys, returned_values = [], []
for key, value in method():
returned_keys.append(key)
returned_values.append(value)
if do_keys:
self.assert_(len(keys) == len(returned_keys))
self.assert_(set(keys) == set(returned_keys))
if do_values:
count = 0
for value in returned_values:
self.assert_(value['from'] == 'foo')
self.assert_(int(value.get_payload()) < repetitions)
count += 1
self.assert_(len(values) == count)
def test_has_key(self):
# Check existence of keys using has_key()
self._test_has_key_or_contains(self._box.has_key)
def test_contains(self):
# Check existence of keys using __contains__()
self._test_has_key_or_contains(self._box.__contains__)
def _test_has_key_or_contains(self, method):
# (Used by test_has_key() and test_contains().)
self.assert_(not method('foo'))
key0 = self._box.add(self._template % 0)
self.assert_(method(key0))
self.assert_(not method('foo'))
key1 = self._box.add(self._template % 1)
self.assert_(method(key1))
self.assert_(method(key0))
self.assert_(not method('foo'))
self._box.remove(key0)
self.assert_(not method(key0))
self.assert_(method(key1))
self.assert_(not method('foo'))
self._box.remove(key1)
self.assert_(not method(key1))
self.assert_(not method(key0))
self.assert_(not method('foo'))
def test_len(self, repetitions=10):
# Get message count
keys = []
for i in xrange(repetitions):
self.assert_(len(self._box) == i)
keys.append(self._box.add(self._template % i))
self.assert_(len(self._box) == i + 1)
for i in xrange(repetitions):
self.assert_(len(self._box) == repetitions - i)
self._box.remove(keys[i])
self.assert_(len(self._box) == repetitions - i - 1)
def test_set_item(self):
# Modify messages using __setitem__()
key0 = self._box.add(self._template % 'original 0')
self.assert_(self._box.get_string(key0) == \
self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
self.assert_(self._box.get_string(key1) == \
self._template % 'original 1')
self._box[key0] = self._template % 'changed 0'
self.assert_(self._box.get_string(key0) == \
self._template % 'changed 0')
self._box[key1] = self._template % 'changed 1'
self.assert_(self._box.get_string(key1) == \
self._template % 'changed 1')
self._box[key0] = _sample_message
self._check_sample(self._box[key0])
self._box[key1] = self._box[key0]
self._check_sample(self._box[key1])
self._box[key0] = self._template % 'original 0'
self.assert_(self._box.get_string(key0) ==
self._template % 'original 0')
self._check_sample(self._box[key1])
self.assertRaises(KeyError,
lambda: self._box.__setitem__('foo', 'bar'))
self.assertRaises(KeyError, lambda: self._box['foo'])
self.assert_(len(self._box) == 2)
def test_clear(self, iterations=10):
# Remove all messages using clear()
keys = []
for i in xrange(iterations):
self._box.add(self._template % i)
for i, key in enumerate(keys):
self.assert_(self._box.get_string(key) == self._template % i)
self._box.clear()
self.assert_(len(self._box) == 0)
for i, key in enumerate(keys):
self.assertRaises(KeyError, lambda: self._box.get_string(key))
def test_pop(self):
# Get and remove a message using pop()
key0 = self._box.add(self._template % 0)
self.assert_(key0 in self._box)
key1 = self._box.add(self._template % 1)
self.assert_(key1 in self._box)
self.assert_(self._box.pop(key0).get_payload() == '0')
self.assert_(key0 not in self._box)
self.assert_(key1 in self._box)
key2 = self._box.add(self._template % 2)
self.assert_(key2 in self._box)
self.assert_(self._box.pop(key2).get_payload() == '2')
self.assert_(key2 not in self._box)
self.assert_(key1 in self._box)
self.assert_(self._box.pop(key1).get_payload() == '1')
self.assert_(key1 not in self._box)
self.assert_(len(self._box) == 0)
def test_popitem(self, iterations=10):
# Get and remove an arbitrary (key, message) using popitem()
keys = []
for i in xrange(10):
keys.append(self._box.add(self._template % i))
seen = []
for i in xrange(10):
key, msg = self._box.popitem()
self.assert_(key in keys)
self.assert_(key not in seen)
seen.append(key)
self.assert_(int(msg.get_payload()) == keys.index(key))
self.assert_(len(self._box) == 0)
for key in keys:
self.assertRaises(KeyError, lambda: self._box[key])
def test_update(self):
# Modify multiple messages using update()
key0 = self._box.add(self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
key2 = self._box.add(self._template % 'original 2')
self._box.update({key0: self._template % 'changed 0',
key2: _sample_message})
self.assert_(len(self._box) == 3)
self.assert_(self._box.get_string(key0) ==
self._template % 'changed 0')
self.assert_(self._box.get_string(key1) ==
self._template % 'original 1')
self._check_sample(self._box[key2])
self._box.update([(key2, self._template % 'changed 2'),
(key1, self._template % 'changed 1'),
(key0, self._template % 'original 0')])
self.assert_(len(self._box) == 3)
self.assert_(self._box.get_string(key0) ==
self._template % 'original 0')
self.assert_(self._box.get_string(key1) ==
self._template % 'changed 1')
self.assert_(self._box.get_string(key2) ==
self._template % 'changed 2')
self.assertRaises(KeyError,
lambda: self._box.update({'foo': 'bar',
key0: self._template % "changed 0"}))
self.assert_(len(self._box) == 3)
self.assert_(self._box.get_string(key0) ==
self._template % "changed 0")
self.assert_(self._box.get_string(key1) ==
self._template % "changed 1")
self.assert_(self._box.get_string(key2) ==
self._template % "changed 2")
def test_flush(self):
# Write changes to disk
self._test_flush_or_close(self._box.flush, True)
def test_lock_unlock(self):
# Lock and unlock the mailbox
self.assert_(not os.path.exists(self._get_lock_path()))
self._box.lock()
self.assert_(os.path.exists(self._get_lock_path()))
self._box.unlock()
self.assert_(not os.path.exists(self._get_lock_path()))
def test_close(self):
# Close mailbox and flush changes to disk
self._test_flush_or_close(self._box.close, False)
def _test_flush_or_close(self, method, should_call_close):
contents = [self._template % i for i in xrange(3)]
self._box.add(contents[0])
self._box.add(contents[1])
self._box.add(contents[2])
method()
if should_call_close:
self._box.close()
self._box = self._factory(self._path)
keys = self._box.keys()
self.assert_(len(keys) == 3)
for key in keys:
self.assert_(self._box.get_string(key) in contents)
def test_dump_message(self):
# Write message representations to disk
for input in (email.message_from_string(_sample_message),
_sample_message, StringIO.StringIO(_sample_message)):
output = StringIO.StringIO()
self._box._dump_message(input, output)
self.assert_(output.getvalue() ==
_sample_message.replace('\n', os.linesep))
output = StringIO.StringIO()
self.assertRaises(TypeError,
lambda: self._box._dump_message(None, output))
def _get_lock_path(self):
# Return the path of the dot lock file. May be overridden.
return self._path + '.lock'
class TestMailboxSuperclass(TestBase):
def test_notimplemented(self):
# Test that all Mailbox methods raise NotImplementedException.
box = mailbox.Mailbox('path')
self.assertRaises(NotImplementedError, lambda: box.add(''))
self.assertRaises(NotImplementedError, lambda: box.remove(''))
self.assertRaises(NotImplementedError, lambda: box.__delitem__(''))
self.assertRaises(NotImplementedError, lambda: box.discard(''))
self.assertRaises(NotImplementedError, lambda: box.__setitem__('', ''))
self.assertRaises(NotImplementedError, lambda: box.iterkeys())
self.assertRaises(NotImplementedError, lambda: box.keys())
self.assertRaises(NotImplementedError, lambda: box.itervalues().next())
self.assertRaises(NotImplementedError, lambda: box.__iter__().next())
self.assertRaises(NotImplementedError, lambda: box.values())
self.assertRaises(NotImplementedError, lambda: box.iteritems().next())
self.assertRaises(NotImplementedError, lambda: box.items())
self.assertRaises(NotImplementedError, lambda: box.get(''))
self.assertRaises(NotImplementedError, lambda: box.__getitem__(''))
self.assertRaises(NotImplementedError, lambda: box.get_message(''))
self.assertRaises(NotImplementedError, lambda: box.get_string(''))
self.assertRaises(NotImplementedError, lambda: box.get_file(''))
self.assertRaises(NotImplementedError, lambda: box.has_key(''))
self.assertRaises(NotImplementedError, lambda: box.__contains__(''))
self.assertRaises(NotImplementedError, lambda: box.__len__())
self.assertRaises(NotImplementedError, lambda: box.clear())
self.assertRaises(NotImplementedError, lambda: box.pop(''))
self.assertRaises(NotImplementedError, lambda: box.popitem())
self.assertRaises(NotImplementedError, lambda: box.update((('', ''),)))
self.assertRaises(NotImplementedError, lambda: box.flush())
self.assertRaises(NotImplementedError, lambda: box.lock())
self.assertRaises(NotImplementedError, lambda: box.unlock())
self.assertRaises(NotImplementedError, lambda: box.close())
class TestMaildir(TestMailbox):
_factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
def setUp(self):
TestMailbox.setUp(self)
if os.name in ('nt', 'os2') or sys.platform == 'cygwin':
self._box.colon = '!'
def test_add_MM(self):
# Add a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_info('foo')
key = self._box.add(msg)
self.assert_(os.path.exists(os.path.join(self._path, 'cur', '%s%sfoo' %
(key, self._box.colon))))
def test_get_MM(self):
# Get a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
msg_returned = self._box.get_message(key)
self.assert_(isinstance(msg_returned, mailbox.MaildirMessage))
self.assert_(msg_returned.get_subdir() == 'cur')
self.assert_(msg_returned.get_flags() == 'FR')
def test_set_MM(self):
# Set with a MaildirMessage instance
msg0 = mailbox.MaildirMessage(self._template % 0)
msg0.set_flags('TP')
key = self._box.add(msg0)
msg_returned = self._box.get_message(key)
self.assert_(msg_returned.get_subdir() == 'new')
self.assert_(msg_returned.get_flags() == 'PT')
msg1 = mailbox.MaildirMessage(self._template % 1)
self._box[key] = msg1
msg_returned = self._box.get_message(key)
self.assert_(msg_returned.get_subdir() == 'new')
self.assert_(msg_returned.get_flags() == '')
self.assert_(msg_returned.get_payload() == '1')
msg2 = mailbox.MaildirMessage(self._template % 2)
msg2.set_info('2,S')
self._box[key] = msg2
self._box[key] = self._template % 3
msg_returned = self._box.get_message(key)
self.assert_(msg_returned.get_subdir() == 'new')
self.assert_(msg_returned.get_flags() == 'S')
self.assert_(msg_returned.get_payload() == '3')
def test_consistent_factory(self):
# Add a message.
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
# Create new mailbox with
class FakeMessage(mailbox.MaildirMessage):
pass
box = mailbox.Maildir(self._path, factory=FakeMessage)
box.colon = self._box.colon
msg2 = box.get_message(key)
self.assert_(isinstance(msg2, FakeMessage))
def test_initialize_new(self):
# Initialize a non-existent mailbox
self.tearDown()
self._box = mailbox.Maildir(self._path)
self._check_basics(factory=rfc822.Message)
self._delete_recursively(self._path)
self._box = self._factory(self._path, factory=None)
self._check_basics()
def test_initialize_existing(self):
# Initialize an existing mailbox
self.tearDown()
for subdir in '', 'tmp', 'new', 'cur':
os.mkdir(os.path.normpath(os.path.join(self._path, subdir)))
self._box = mailbox.Maildir(self._path)
self._check_basics(factory=rfc822.Message)
self._box = mailbox.Maildir(self._path, factory=None)
self._check_basics()
def _check_basics(self, factory=None):
# (Used by test_open_new() and test_open_existing().)
self.assertEqual(self._box._path, os.path.abspath(self._path))
self.assertEqual(self._box._factory, factory)
for subdir in '', 'tmp', 'new', 'cur':
path = os.path.join(self._path, subdir)
mode = os.stat(path)[stat.ST_MODE]
self.assert_(stat.S_ISDIR(mode), "Not a directory: '%s'" % path)
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assert_(len(self._box.list_folders()) == 3)
self.assert_(set(self._box.list_folders()) ==
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assert_(os.path.isdir(os.path.join(self._path, '.foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assert_(folder1.get_string(folder1.keys()[0]) == \
self._template % 'bar')
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assert_(len(self._box.list_folders()) == 2)
self.assert_(set(self._box.list_folders()) == set(('one', 'two')))
self._box.remove_folder('one')
self.assert_(len(self._box.list_folders()) == 1)
self.assert_(set(self._box.list_folders()) == set(('two',)))
self._box.add_folder('three')
self.assert_(len(self._box.list_folders()) == 2)
self.assert_(set(self._box.list_folders()) == set(('two', 'three')))
self._box.remove_folder('three')
self.assert_(len(self._box.list_folders()) == 1)
self.assert_(set(self._box.list_folders()) == set(('two',)))
self._box.remove_folder('two')
self.assert_(len(self._box.list_folders()) == 0)
self.assert_(self._box.list_folders() == [])
def test_clean(self):
# Remove old files from 'tmp'
foo_path = os.path.join(self._path, 'tmp', 'foo')
bar_path = os.path.join(self._path, 'tmp', 'bar')
f = open(foo_path, 'w')
f.write("@")
f.close()
f = open(bar_path, 'w')
f.write("@")
f.close()
self._box.clean()
self.assert_(os.path.exists(foo_path))
self.assert_(os.path.exists(bar_path))
foo_stat = os.stat(foo_path)
os.utime(foo_path, (time.time() - 129600 - 2,
foo_stat.st_mtime))
self._box.clean()
self.assert_(not os.path.exists(foo_path))
self.assert_(os.path.exists(bar_path))
def test_create_tmp(self, repetitions=10):
# Create files in tmp directory
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
pid = os.getpid()
pattern = re.compile(r"(?P<time>\d+)\.M(?P<M>\d{1,6})P(?P<P>\d+)"
r"Q(?P<Q>\d+)\.(?P<host>[^:/]+)")
previous_groups = None
for x in xrange(repetitions):
tmp_file = self._box._create_tmp()
head, tail = os.path.split(tmp_file.name)
self.assertEqual(head, os.path.abspath(os.path.join(self._path,
"tmp")),
"File in wrong location: '%s'" % head)
match = pattern.match(tail)
self.assert_(match is not None, "Invalid file name: '%s'" % tail)
groups = match.groups()
if previous_groups is not None:
self.assert_(int(groups[0] >= previous_groups[0]),
"Non-monotonic seconds: '%s' before '%s'" %
(previous_groups[0], groups[0]))
self.assert_(int(groups[1] >= previous_groups[1]) or
groups[0] != groups[1],
"Non-monotonic milliseconds: '%s' before '%s'" %
(previous_groups[1], groups[1]))
self.assert_(int(groups[2]) == pid,
"Process ID mismatch: '%s' should be '%s'" %
(groups[2], pid))
self.assert_(int(groups[3]) == int(previous_groups[3]) + 1,
"Non-sequential counter: '%s' before '%s'" %
(previous_groups[3], groups[3]))
self.assert_(groups[4] == hostname,
"Host name mismatch: '%s' should be '%s'" %
(groups[4], hostname))
previous_groups = groups
tmp_file.write(_sample_message)
tmp_file.seek(0)
self.assert_(tmp_file.read() == _sample_message)
tmp_file.close()
file_count = len(os.listdir(os.path.join(self._path, "tmp")))
self.assert_(file_count == repetitions,
"Wrong file count: '%s' should be '%s'" %
(file_count, repetitions))
def test_refresh(self):
# Update the table of contents
self.assert_(self._box._toc == {})
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assert_(self._box._toc == {})
self._box._refresh()
self.assert_(self._box._toc == {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
key2 = self._box.add(self._template % 2)
self.assert_(self._box._toc == {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
self._box._refresh()
self.assert_(self._box._toc == {key0: os.path.join('new', key0),
key1: os.path.join('new', key1),
key2: os.path.join('new', key2)})
def test_lookup(self):
# Look up message subpaths in the TOC
self.assertRaises(KeyError, lambda: self._box._lookup('foo'))
key0 = self._box.add(self._template % 0)
self.assert_(self._box._lookup(key0) == os.path.join('new', key0))
os.remove(os.path.join(self._path, 'new', key0))
self.assert_(self._box._toc == {key0: os.path.join('new', key0)})
self.assertRaises(KeyError, lambda: self._box._lookup(key0))
self.assert_(self._box._toc == {})
def test_lock_unlock(self):
# Lock and unlock the mailbox. For Maildir, this does nothing.
self._box.lock()
self._box.unlock()
def test_folder (self):
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
def dummy_factory (s):
return None
box = self._factory(self._path, factory=dummy_factory)
folder = box.add_folder('folder1')
self.assert_(folder._factory is dummy_factory)
folder1_alias = box.get_folder('folder1')
self.assert_(folder1_alias._factory is dummy_factory)
def test_directory_in_folder (self):
# Test that mailboxes still work if there's a stray extra directory
# in a folder.
for i in range(10):
self._box.add(mailbox.Message(_sample_message))
# Create a stray directory
os.mkdir(os.path.join(self._path, 'cur', 'stray-dir'))
# Check that looping still works with the directory present.
for msg in self._box:
pass
def test_file_permissions(self):
# Verify that message files are created without execute permissions
if not hasattr(os, "stat") or not hasattr(os, "umask"):
return
msg = mailbox.MaildirMessage(self._template % 0)
orig_umask = os.umask(0)
try:
key = self._box.add(msg)
finally:
os.umask(orig_umask)
path = os.path.join(self._path, self._box._lookup(key))
mode = os.stat(path).st_mode
self.assert_(mode & 0111 == 0)
def test_folder_file_perms(self):
# From bug #3228, we want to verify that the file created inside a Maildir
# subfolder isn't marked as executable.
if not hasattr(os, "stat") or not hasattr(os, "umask"):
return
orig_umask = os.umask(0)
try:
subfolder = self._box.add_folder('subfolder')
finally:
os.umask(orig_umask)
path = os.path.join(subfolder._path, 'maildirfolder')
st = os.stat(path)
perms = st.st_mode
self.assertFalse((perms & 0111)) # Execute bits should all be off.
class _TestMboxMMDF(TestMailbox):
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
test_support.unlink(lock_remnant)
def test_add_from_string(self):
# Add a string starting with 'From ' to the mailbox
key = self._box.add('From foo@bar blah\nFrom: foo\n\n0')
self.assert_(self._box[key].get_from() == 'foo@bar blah')
self.assert_(self._box[key].get_payload() == '0')
def test_add_mbox_or_mmdf_message(self):
# Add an mboxMessage or MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_('From foo@bar blah\nFrom: foo\n\n0')
key = self._box.add(msg)
def test_open_close_open(self):
# Open and inspect previously-created mailbox
values = [self._template % i for i in xrange(3)]
for value in values:
self._box.add(value)
self._box.close()
mtime = os.path.getmtime(self._path)
self._box = self._factory(self._path)
self.assert_(len(self._box) == 3)
for key in self._box.iterkeys():
self.assert_(self._box.get_string(key) in values)
self._box.close()
self.assert_(mtime == os.path.getmtime(self._path))
def test_add_and_close(self):
# Verifying that closing a mailbox doesn't change added items
self._box.add(_sample_message)
for i in xrange(3):
self._box.add(self._template % i)
self._box.add(_sample_message)
self._box._file.flush()
self._box._file.seek(0)
contents = self._box._file.read()
self._box.close()
self.assert_(contents == open(self._path, 'rb').read())
self._box = self._factory(self._path)
def test_lock_conflict(self):
# Fork off a subprocess that will lock the file for 2 seconds,
# unlock it, and then exit.
if not hasattr(os, 'fork'):
return
pid = os.fork()
if pid == 0:
# In the child, lock the mailbox.
self._box.lock()
time.sleep(2)
self._box.unlock()
os._exit(0)
# In the parent, sleep a bit to give the child time to acquire
# the lock.
time.sleep(0.5)
try:
self.assertRaises(mailbox.ExternalClashError,
self._box.lock)
finally:
# Wait for child to exit. Locking should now succeed.
exited_pid, status = os.waitpid(pid, 0)
self._box.lock()
self._box.unlock()
def test_relock(self):
# Test case for bug #1575506: the mailbox class was locking the
# wrong file object in its flush() method.
msg = "Subject: sub\n\nbody\n"
key1 = self._box.add(msg)
self._box.flush()
self._box.close()
self._box = self._factory(self._path)
self._box.lock()
key2 = self._box.add(msg)
self._box.flush()
self.assert_(self._box._locked)
self._box.close()
class TestMbox(_TestMboxMMDF):
_factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
def test_file_perms(self):
# From bug #3228, we want to verify that the mailbox file isn't executable,
# even if the umask is set to something that would leave executable bits set.
# We only run this test on platforms that support umask.
if hasattr(os, 'umask') and hasattr(os, 'stat'):
try:
old_umask = os.umask(0077)
self._box.close()
os.unlink(self._path)
self._box = mailbox.mbox(self._path, create=True)
self._box.add('')
self._box.close()
finally:
os.umask(old_umask)
st = os.stat(self._path)
perms = st.st_mode
self.assertFalse((perms & 0111)) # Execute bits should all be off.
class TestMMDF(_TestMboxMMDF):
_factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
class TestMH(TestMailbox):
_factory = lambda self, path, factory=None: mailbox.MH(path, factory)
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assert_(len(self._box.list_folders()) == 3)
self.assert_(set(self._box.list_folders()) ==
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
def dummy_factory (s):
return None
self._box = self._factory(self._path, dummy_factory)
new_folder = self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assert_(os.path.isdir(os.path.join(self._path, 'foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assert_(folder1.get_string(folder1.keys()[0]) == \
self._template % 'bar')
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
self.assert_(new_folder._factory is self._box._factory)
self.assert_(folder0._factory is self._box._factory)
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assert_(len(self._box.list_folders()) == 2)
self.assert_(set(self._box.list_folders()) == set(('one', 'two')))
self._box.remove_folder('one')
self.assert_(len(self._box.list_folders()) == 1)
self.assert_(set(self._box.list_folders()) == set(('two',)))
self._box.add_folder('three')
self.assert_(len(self._box.list_folders()) == 2)
self.assert_(set(self._box.list_folders()) == set(('two', 'three')))
self._box.remove_folder('three')
self.assert_(len(self._box.list_folders()) == 1)
self.assert_(set(self._box.list_folders()) == set(('two',)))
self._box.remove_folder('two')
self.assert_(len(self._box.list_folders()) == 0)
self.assert_(self._box.list_folders() == [])
def test_sequences(self):
# Get and set sequences
self.assert_(self._box.get_sequences() == {})
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
self.assert_(self._box.get_sequences() == {'foo':[key0]})
msg1 = mailbox.MHMessage(self._template % 1)
msg1.set_sequences(['bar', 'replied', 'foo'])
key1 = self._box.add(msg1)
self.assert_(self._box.get_sequences() ==
{'foo':[key0, key1], 'bar':[key1], 'replied':[key1]})
msg0.set_sequences(['flagged'])
self._box[key0] = msg0
self.assert_(self._box.get_sequences() ==
{'foo':[key1], 'bar':[key1], 'replied':[key1],
'flagged':[key0]})
self._box.remove(key1)
self.assert_(self._box.get_sequences() == {'flagged':[key0]})
def test_issue2625(self):
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
refmsg0 = self._box.get_message(key0)
def test_pack(self):
# Pack the contents of the mailbox
msg0 = mailbox.MHMessage(self._template % 0)
msg1 = mailbox.MHMessage(self._template % 1)
msg2 = mailbox.MHMessage(self._template % 2)
msg3 = mailbox.MHMessage(self._template % 3)
msg0.set_sequences(['foo', 'unseen'])
msg1.set_sequences(['foo'])
msg2.set_sequences(['foo', 'flagged'])
msg3.set_sequences(['foo', 'bar', 'replied'])
key0 = self._box.add(msg0)
key1 = self._box.add(msg1)
key2 = self._box.add(msg2)
key3 = self._box.add(msg3)
self.assert_(self._box.get_sequences() ==
{'foo':[key0,key1,key2,key3], 'unseen':[key0],
'flagged':[key2], 'bar':[key3], 'replied':[key3]})
self._box.remove(key2)
self.assert_(self._box.get_sequences() ==
{'foo':[key0,key1,key3], 'unseen':[key0], 'bar':[key3],
'replied':[key3]})
self._box.pack()
self.assert_(self._box.keys() == [1, 2, 3])
key0 = key0
key1 = key0 + 1
key2 = key1 + 1
self.assert_(self._box.get_sequences() ==
{'foo':[1, 2, 3], 'unseen':[1], 'bar':[3], 'replied':[3]})
# Test case for packing while holding the mailbox locked.
key0 = self._box.add(msg1)
key1 = self._box.add(msg1)
key2 = self._box.add(msg1)
key3 = self._box.add(msg1)
self._box.remove(key0)
self._box.remove(key2)
self._box.lock()
self._box.pack()
self._box.unlock()
self.assert_(self._box.get_sequences() ==
{'foo':[1, 2, 3, 4, 5],
'unseen':[1], 'bar':[3], 'replied':[3]})
def _get_lock_path(self):
return os.path.join(self._path, '.mh_sequences.lock')
class TestBabyl(TestMailbox):
_factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
test_support.unlink(lock_remnant)
def test_labels(self):
# Get labels from the mailbox
self.assert_(self._box.get_labels() == [])
msg0 = mailbox.BabylMessage(self._template % 0)
msg0.add_label('foo')
key0 = self._box.add(msg0)
self.assert_(self._box.get_labels() == ['foo'])
msg1 = mailbox.BabylMessage(self._template % 1)
msg1.set_labels(['bar', 'answered', 'foo'])
key1 = self._box.add(msg1)
self.assert_(set(self._box.get_labels()) == set(['foo', 'bar']))
msg0.set_labels(['blah', 'filed'])
self._box[key0] = msg0
self.assert_(set(self._box.get_labels()) ==
set(['foo', 'bar', 'blah']))
self._box.remove(key1)
self.assert_(set(self._box.get_labels()) == set(['blah']))
class TestMessage(TestBase):
_factory = mailbox.Message # Overridden by subclasses to reuse tests
def setUp(self):
self._path = test_support.TESTFN
def tearDown(self):
self._delete_recursively(self._path)
def test_initialize_with_eMM(self):
# Initialize based on email.message.Message instance
eMM = email.message_from_string(_sample_message)
msg = self._factory(eMM)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_string(self):
# Initialize based on string
msg = self._factory(_sample_message)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_file(self):
# Initialize based on contents of file
f = open(self._path, 'w+')
f.write(_sample_message)
f.seek(0)
msg = self._factory(f)
self._post_initialize_hook(msg)
self._check_sample(msg)
f.close()
def test_initialize_with_nothing(self):
# Initialize without arguments
msg = self._factory()
self._post_initialize_hook(msg)
self.assert_(isinstance(msg, email.message.Message))
self.assert_(isinstance(msg, mailbox.Message))
self.assert_(isinstance(msg, self._factory))
self.assert_(msg.keys() == [])
self.assert_(not msg.is_multipart())
self.assert_(msg.get_payload() == None)
def test_initialize_incorrectly(self):
# Initialize with invalid argument
self.assertRaises(TypeError, lambda: self._factory(object()))
def test_become_message(self):
# Take on the state of another message
eMM = email.message_from_string(_sample_message)
msg = self._factory()
msg._become_message(eMM)
self._check_sample(msg)
def test_explain_to(self):
# Copy self's format-specific data to other message formats.
# This test is superficial; better ones are in TestMessageConversion.
msg = self._factory()
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
other_msg = class_()
msg._explain_to(other_msg)
other_msg = email.message.Message()
self.assertRaises(TypeError, lambda: msg._explain_to(other_msg))
def _post_initialize_hook(self, msg):
# Overridden by subclasses to check extra things after initialization
pass
class TestMaildirMessage(TestMessage):
_factory = mailbox.MaildirMessage
def _post_initialize_hook(self, msg):
self.assert_(msg._subdir == 'new')
self.assert_(msg._info == '')
def test_subdir(self):
# Use get_subdir() and set_subdir()
msg = mailbox.MaildirMessage(_sample_message)
self.assert_(msg.get_subdir() == 'new')
msg.set_subdir('cur')
self.assert_(msg.get_subdir() == 'cur')
msg.set_subdir('new')
self.assert_(msg.get_subdir() == 'new')
self.assertRaises(ValueError, lambda: msg.set_subdir('tmp'))
self.assert_(msg.get_subdir() == 'new')
msg.set_subdir('new')
self.assert_(msg.get_subdir() == 'new')
self._check_sample(msg)
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.MaildirMessage(_sample_message)
self.assert_(msg.get_flags() == '')
self.assert_(msg.get_subdir() == 'new')
msg.set_flags('F')
self.assert_(msg.get_subdir() == 'new')
self.assert_(msg.get_flags() == 'F')
msg.set_flags('SDTP')
self.assert_(msg.get_flags() == 'DPST')
msg.add_flag('FT')
self.assert_(msg.get_flags() == 'DFPST')
msg.remove_flag('TDRP')
self.assert_(msg.get_flags() == 'FS')
self.assert_(msg.get_subdir() == 'new')
self._check_sample(msg)
def test_date(self):
# Use get_date() and set_date()
msg = mailbox.MaildirMessage(_sample_message)
self.assert_(abs(msg.get_date() - time.time()) < 60)
msg.set_date(0.0)
self.assert_(msg.get_date() == 0.0)
def test_info(self):
# Use get_info() and set_info()
msg = mailbox.MaildirMessage(_sample_message)
self.assert_(msg.get_info() == '')
msg.set_info('1,foo=bar')
self.assert_(msg.get_info() == '1,foo=bar')
self.assertRaises(TypeError, lambda: msg.set_info(None))
self._check_sample(msg)
def test_info_and_flags(self):
# Test interaction of info and flag methods
msg = mailbox.MaildirMessage(_sample_message)
self.assert_(msg.get_info() == '')
msg.set_flags('SF')
self.assert_(msg.get_flags() == 'FS')
self.assert_(msg.get_info() == '2,FS')
msg.set_info('1,')
self.assert_(msg.get_flags() == '')
self.assert_(msg.get_info() == '1,')
msg.remove_flag('RPT')
self.assert_(msg.get_flags() == '')
self.assert_(msg.get_info() == '1,')
msg.add_flag('D')
self.assert_(msg.get_flags() == 'D')
self.assert_(msg.get_info() == '2,D')
self._check_sample(msg)
class _TestMboxMMDFMessage(TestMessage):
_factory = mailbox._mboxMMDFMessage
def _post_initialize_hook(self, msg):
self._check_from(msg)
def test_initialize_with_unixfrom(self):
# Initialize with a message that already has a _unixfrom attribute
msg = mailbox.Message(_sample_message)
msg.set_unixfrom('From foo@bar blah')
msg = mailbox.mboxMessage(msg)
self.assert_(msg.get_from() == 'foo@bar blah', msg.get_from())
def test_from(self):
# Get and set "From " line
msg = mailbox.mboxMessage(_sample_message)
self._check_from(msg)
msg.set_from('foo bar')
self.assert_(msg.get_from() == 'foo bar')
msg.set_from('foo@bar', True)
self._check_from(msg, 'foo@bar')
msg.set_from('blah@temp', time.localtime())
self._check_from(msg, 'blah@temp')
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.mboxMessage(_sample_message)
self.assert_(msg.get_flags() == '')
msg.set_flags('F')
self.assert_(msg.get_flags() == 'F')
msg.set_flags('XODR')
self.assert_(msg.get_flags() == 'RODX')
msg.add_flag('FA')
self.assert_(msg.get_flags() == 'RODFAX')
msg.remove_flag('FDXA')
self.assert_(msg.get_flags() == 'RO')
self._check_sample(msg)
def _check_from(self, msg, sender=None):
# Check contents of "From " line
if sender is None:
sender = "MAILER-DAEMON"
self.assert_(re.match(sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:"
r"\d{2} \d{4}", msg.get_from()) is not None)
class TestMboxMessage(_TestMboxMMDFMessage):
_factory = mailbox.mboxMessage
class TestMHMessage(TestMessage):
_factory = mailbox.MHMessage
def _post_initialize_hook(self, msg):
self.assert_(msg._sequences == [])
def test_sequences(self):
# Get, set, join, and leave sequences
msg = mailbox.MHMessage(_sample_message)
self.assert_(msg.get_sequences() == [])
msg.set_sequences(['foobar'])
self.assert_(msg.get_sequences() == ['foobar'])
msg.set_sequences([])
self.assert_(msg.get_sequences() == [])
msg.add_sequence('unseen')
self.assert_(msg.get_sequences() == ['unseen'])
msg.add_sequence('flagged')
self.assert_(msg.get_sequences() == ['unseen', 'flagged'])
msg.add_sequence('flagged')
self.assert_(msg.get_sequences() == ['unseen', 'flagged'])
msg.remove_sequence('unseen')
self.assert_(msg.get_sequences() == ['flagged'])
msg.add_sequence('foobar')
self.assert_(msg.get_sequences() == ['flagged', 'foobar'])
msg.remove_sequence('replied')
self.assert_(msg.get_sequences() == ['flagged', 'foobar'])
msg.set_sequences(['foobar', 'replied'])
self.assert_(msg.get_sequences() == ['foobar', 'replied'])
class TestBabylMessage(TestMessage):
_factory = mailbox.BabylMessage
def _post_initialize_hook(self, msg):
self.assert_(msg._labels == [])
def test_labels(self):
# Get, set, join, and leave labels
msg = mailbox.BabylMessage(_sample_message)
self.assert_(msg.get_labels() == [])
msg.set_labels(['foobar'])
self.assert_(msg.get_labels() == ['foobar'])
msg.set_labels([])
self.assert_(msg.get_labels() == [])
msg.add_label('filed')
self.assert_(msg.get_labels() == ['filed'])
msg.add_label('resent')
self.assert_(msg.get_labels() == ['filed', 'resent'])
msg.add_label('resent')
self.assert_(msg.get_labels() == ['filed', 'resent'])
msg.remove_label('filed')
self.assert_(msg.get_labels() == ['resent'])
msg.add_label('foobar')
self.assert_(msg.get_labels() == ['resent', 'foobar'])
msg.remove_label('unseen')
self.assert_(msg.get_labels() == ['resent', 'foobar'])
msg.set_labels(['foobar', 'answered'])
self.assert_(msg.get_labels() == ['foobar', 'answered'])
def test_visible(self):
# Get, set, and update visible headers
msg = mailbox.BabylMessage(_sample_message)
visible = msg.get_visible()
self.assert_(visible.keys() == [])
self.assert_(visible.get_payload() is None)
visible['User-Agent'] = 'FooBar 1.0'
visible['X-Whatever'] = 'Blah'
self.assert_(msg.get_visible().keys() == [])
msg.set_visible(visible)
visible = msg.get_visible()
self.assert_(visible.keys() == ['User-Agent', 'X-Whatever'])
self.assert_(visible['User-Agent'] == 'FooBar 1.0')
self.assert_(visible['X-Whatever'] == 'Blah')
self.assert_(visible.get_payload() is None)
msg.update_visible()
self.assert_(visible.keys() == ['User-Agent', 'X-Whatever'])
self.assert_(visible.get_payload() is None)
visible = msg.get_visible()
self.assert_(visible.keys() == ['User-Agent', 'Date', 'From', 'To',
'Subject'])
for header in ('User-Agent', 'Date', 'From', 'To', 'Subject'):
self.assert_(visible[header] == msg[header])
class TestMMDFMessage(_TestMboxMMDFMessage):
_factory = mailbox.MMDFMessage
class TestMessageConversion(TestBase):
def test_plain_to_x(self):
# Convert Message to all formats
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
msg_plain = mailbox.Message(_sample_message)
msg = class_(msg_plain)
self._check_sample(msg)
def test_x_to_plain(self):
# Convert all formats to Message
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
msg = class_(_sample_message)
msg_plain = mailbox.Message(msg)
self._check_sample(msg_plain)
def test_x_to_invalid(self):
# Convert all formats to an invalid format
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
self.assertRaises(TypeError, lambda: class_(False))
def test_maildir_to_maildir(self):
# Convert MaildirMessage to MaildirMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_flags('DFPRST')
msg_maildir.set_subdir('cur')
date = msg_maildir.get_date()
msg = mailbox.MaildirMessage(msg_maildir)
self._check_sample(msg)
self.assert_(msg.get_flags() == 'DFPRST')
self.assert_(msg.get_subdir() == 'cur')
self.assert_(msg.get_date() == date)
def test_maildir_to_mboxmmdf(self):
# Convert MaildirMessage to mboxmessage and MMDFMessage
pairs = (('D', ''), ('F', 'F'), ('P', ''), ('R', 'A'), ('S', 'R'),
('T', 'D'), ('DFPRST', 'RDFA'))
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_date(0.0)
for setting, result in pairs:
msg_maildir.set_flags(setting)
msg = class_(msg_maildir)
self.assert_(msg.get_flags() == result)
self.assert_(msg.get_from() == 'MAILER-DAEMON %s' %
time.asctime(time.gmtime(0.0)))
msg_maildir.set_subdir('cur')
self.assert_(class_(msg_maildir).get_flags() == 'RODFA')
def test_maildir_to_mh(self):
# Convert MaildirMessage to MHMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen', 'flagged']),
('P', ['unseen']), ('R', ['unseen', 'replied']), ('S', []),
('T', ['unseen']), ('DFPRST', ['replied', 'flagged']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assert_(mailbox.MHMessage(msg_maildir).get_sequences() == \
result)
def test_maildir_to_babyl(self):
# Convert MaildirMessage to Babyl
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen']),
('P', ['unseen', 'forwarded']), ('R', ['unseen', 'answered']),
('S', []), ('T', ['unseen', 'deleted']),
('DFPRST', ['deleted', 'answered', 'forwarded']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assert_(mailbox.BabylMessage(msg_maildir).get_labels() == \
result)
def test_mboxmmdf_to_maildir(self):
# Convert mboxMessage and MMDFMessage to MaildirMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_from('foo@bar', time.gmtime(0.0))
pairs = (('R', 'S'), ('O', ''), ('D', 'T'), ('F', 'F'), ('A', 'R'),
('RODFA', 'FRST'))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
msg = mailbox.MaildirMessage(msg_mboxMMDF)
self.assert_(msg.get_flags() == result)
self.assert_(msg.get_date() == 0.0, msg.get_date())
msg_mboxMMDF.set_flags('O')
self.assert_(mailbox.MaildirMessage(msg_mboxMMDF).get_subdir() == \
'cur')
def test_mboxmmdf_to_mboxmmdf(self):
# Convert mboxMessage and MMDFMessage to mboxMessage and MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_flags('RODFA')
msg_mboxMMDF.set_from('foo@bar')
for class2_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg2 = class2_(msg_mboxMMDF)
self.assert_(msg2.get_flags() == 'RODFA')
self.assert_(msg2.get_from() == 'foo@bar')
def test_mboxmmdf_to_mh(self):
# Convert mboxMessage and MMDFMessage to MHMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']), ('D', ['unseen']),
('F', ['unseen', 'flagged']),
('A', ['unseen', 'replied']),
('RODFA', ['replied', 'flagged']))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
self.assert_(mailbox.MHMessage(msg_mboxMMDF).get_sequences() \
== result)
def test_mboxmmdf_to_babyl(self):
# Convert mboxMessage and MMDFMessage to BabylMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']),
('D', ['unseen', 'deleted']), ('F', ['unseen']),
('A', ['unseen', 'answered']),
('RODFA', ['deleted', 'answered']))
for setting, result in pairs:
msg.set_flags(setting)
self.assert_(mailbox.BabylMessage(msg).get_labels() == result)
def test_mh_to_maildir(self):
# Convert MHMessage to MaildirMessage
pairs = (('unseen', ''), ('replied', 'RS'), ('flagged', 'FS'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assert_(mailbox.MaildirMessage(msg).get_flags() == result)
self.assert_(mailbox.MaildirMessage(msg).get_subdir() == 'cur')
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assert_(mailbox.MaildirMessage(msg).get_flags() == 'FR')
self.assert_(mailbox.MaildirMessage(msg).get_subdir() == 'cur')
def test_mh_to_mboxmmdf(self):
# Convert MHMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('replied', 'ROA'), ('flagged', 'ROF'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assert_(class_(msg).get_flags() == result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assert_(class_(msg).get_flags() == 'OFA')
def test_mh_to_mh(self):
# Convert MHMessage to MHMessage
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assert_(mailbox.MHMessage(msg).get_sequences() == \
['unseen', 'replied', 'flagged'])
def test_mh_to_babyl(self):
# Convert MHMessage to BabylMessage
pairs = (('unseen', ['unseen']), ('replied', ['answered']),
('flagged', []))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assert_(mailbox.BabylMessage(msg).get_labels() == result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assert_(mailbox.BabylMessage(msg).get_labels() == \
['unseen', 'answered'])
def test_babyl_to_maildir(self):
# Convert BabylMessage to MaildirMessage
pairs = (('unseen', ''), ('deleted', 'ST'), ('filed', 'S'),
('answered', 'RS'), ('forwarded', 'PS'), ('edited', 'S'),
('resent', 'PS'))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assert_(mailbox.MaildirMessage(msg).get_flags() == result)
self.assert_(mailbox.MaildirMessage(msg).get_subdir() == 'cur')
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assert_(mailbox.MaildirMessage(msg).get_flags() == 'PRT')
self.assert_(mailbox.MaildirMessage(msg).get_subdir() == 'cur')
def test_babyl_to_mboxmmdf(self):
# Convert BabylMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('deleted', 'ROD'), ('filed', 'RO'),
('answered', 'ROA'), ('forwarded', 'RO'), ('edited', 'RO'),
('resent', 'RO'))
for setting, result in pairs:
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assert_(class_(msg).get_flags() == result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assert_(class_(msg).get_flags() == 'ODA')
def test_babyl_to_mh(self):
# Convert BabylMessage to MHMessage
pairs = (('unseen', ['unseen']), ('deleted', []), ('filed', []),
('answered', ['replied']), ('forwarded', []), ('edited', []),
('resent', []))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assert_(mailbox.MHMessage(msg).get_sequences() == result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assert_(mailbox.MHMessage(msg).get_sequences() == \
['unseen', 'replied'])
def test_babyl_to_babyl(self):
# Convert BabylMessage to BabylMessage
msg = mailbox.BabylMessage(_sample_message)
msg.update_visible()
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
msg2 = mailbox.BabylMessage(msg)
self.assert_(msg2.get_labels() == ['unseen', 'deleted', 'filed',
'answered', 'forwarded', 'edited',
'resent'])
self.assert_(msg.get_visible().keys() == msg2.get_visible().keys())
for key in msg.get_visible().keys():
self.assert_(msg.get_visible()[key] == msg2.get_visible()[key])
class TestProxyFileBase(TestBase):
def _test_read(self, proxy):
# Read by byte
proxy.seek(0)
self.assert_(proxy.read() == 'bar')
proxy.seek(1)
self.assert_(proxy.read() == 'ar')
proxy.seek(0)
self.assert_(proxy.read(2) == 'ba')
proxy.seek(1)
self.assert_(proxy.read(-1) == 'ar')
proxy.seek(2)
self.assert_(proxy.read(1000) == 'r')
def _test_readline(self, proxy):
# Read by line
proxy.seek(0)
self.assert_(proxy.readline() == 'foo' + os.linesep)
self.assert_(proxy.readline() == 'bar' + os.linesep)
self.assert_(proxy.readline() == 'fred' + os.linesep)
self.assert_(proxy.readline() == 'bob')
proxy.seek(2)
self.assert_(proxy.readline() == 'o' + os.linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assert_(proxy.readline() == 'fred' + os.linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assert_(proxy.readline(2) == 'fr')
self.assert_(proxy.readline(-10) == 'ed' + os.linesep)
def _test_readlines(self, proxy):
# Read multiple lines
proxy.seek(0)
self.assert_(proxy.readlines() == ['foo' + os.linesep,
'bar' + os.linesep,
'fred' + os.linesep, 'bob'])
proxy.seek(0)
self.assert_(proxy.readlines(2) == ['foo' + os.linesep])
proxy.seek(3 + len(os.linesep))
self.assert_(proxy.readlines(4 + len(os.linesep)) ==
['bar' + os.linesep, 'fred' + os.linesep])
proxy.seek(3)
self.assert_(proxy.readlines(1000) == [os.linesep, 'bar' + os.linesep,
'fred' + os.linesep, 'bob'])
def _test_iteration(self, proxy):
# Iterate by line
proxy.seek(0)
iterator = iter(proxy)
self.assert_(iterator.next() == 'foo' + os.linesep)
self.assert_(iterator.next() == 'bar' + os.linesep)
self.assert_(iterator.next() == 'fred' + os.linesep)
self.assert_(iterator.next() == 'bob')
self.assertRaises(StopIteration, lambda: iterator.next())
def _test_seek_and_tell(self, proxy):
# Seek and use tell to check position
proxy.seek(3)
self.assert_(proxy.tell() == 3)
self.assert_(proxy.read(len(os.linesep)) == os.linesep)
proxy.seek(2, 1)
self.assert_(proxy.read(1 + len(os.linesep)) == 'r' + os.linesep)
proxy.seek(-3 - len(os.linesep), 2)
self.assert_(proxy.read(3) == 'bar')
proxy.seek(2, 0)
self.assert_(proxy.read() == 'o' + os.linesep + 'bar' + os.linesep)
proxy.seek(100)
self.assert_(proxy.read() == '')
def _test_close(self, proxy):
# Close a file
proxy.close()
self.assertRaises(AttributeError, lambda: proxy.close())
class TestProxyFile(TestProxyFileBase):
def setUp(self):
self._path = test_support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write('foo')
pos = self._file.tell()
proxy0 = mailbox._ProxyFile(self._file)
self.assert_(proxy0.tell() == pos)
self.assert_(self._file.tell() == pos)
proxy1 = mailbox._ProxyFile(self._file, 0)
self.assert_(proxy1.tell() == 0)
self.assert_(self._file.tell() == pos)
def test_read(self):
self._file.write('bar')
self._test_read(mailbox._ProxyFile(self._file))
def test_readline(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_readline(mailbox._ProxyFile(self._file))
def test_readlines(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_readlines(mailbox._ProxyFile(self._file))
def test_iteration(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_iteration(mailbox._ProxyFile(self._file))
def test_seek_and_tell(self):
self._file.write('foo%sbar%s' % (os.linesep, os.linesep))
self._test_seek_and_tell(mailbox._ProxyFile(self._file))
def test_close(self):
self._file.write('foo%sbar%s' % (os.linesep, os.linesep))
self._test_close(mailbox._ProxyFile(self._file))
class TestPartialFile(TestProxyFileBase):
def setUp(self):
self._path = test_support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write('foo' + os.linesep + 'bar')
pos = self._file.tell()
proxy = mailbox._PartialFile(self._file, 2, 5)
self.assert_(proxy.tell() == 0)
self.assert_(self._file.tell() == pos)
def test_read(self):
self._file.write('***bar***')
self._test_read(mailbox._PartialFile(self._file, 3, 6))
def test_readline(self):
self._file.write('!!!!!foo%sbar%sfred%sbob!!!!!' %
(os.linesep, os.linesep, os.linesep))
self._test_readline(mailbox._PartialFile(self._file, 5,
18 + 3 * len(os.linesep)))
def test_readlines(self):
self._file.write('foo%sbar%sfred%sbob?????' %
(os.linesep, os.linesep, os.linesep))
self._test_readlines(mailbox._PartialFile(self._file, 0,
13 + 3 * len(os.linesep)))
def test_iteration(self):
self._file.write('____foo%sbar%sfred%sbob####' %
(os.linesep, os.linesep, os.linesep))
self._test_iteration(mailbox._PartialFile(self._file, 4,
17 + 3 * len(os.linesep)))
def test_seek_and_tell(self):
self._file.write('(((foo%sbar%s$$$' % (os.linesep, os.linesep))
self._test_seek_and_tell(mailbox._PartialFile(self._file, 3,
9 + 2 * len(os.linesep)))
def test_close(self):
self._file.write('&foo%sbar%s^' % (os.linesep, os.linesep))
self._test_close(mailbox._PartialFile(self._file, 1,
6 + 3 * len(os.linesep)))
## Start: tests from the original module (for backward compatibility).
FROM_ = "From some.body@dummy.domain Sat Jul 24 13:43:35 2004\n"
DUMMY_MESSAGE = """\
From: some.body@dummy.domain
To: me@my.domain
Subject: Simple Test
This is a dummy message.
"""
class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = test_support.TESTFN
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
os.mkdir(os.path.join(self._dir, "new"))
self._counter = 1
self._msgfiles = []
def tearDown(self):
map(os.unlink, self._msgfiles)
os.rmdir(os.path.join(self._dir, "cur"))
os.rmdir(os.path.join(self._dir, "tmp"))
os.rmdir(os.path.join(self._dir, "new"))
os.rmdir(self._dir)
def createMessage(self, dir, mbox=False):
t = int(time.time() % 1000000)
pid = self._counter
self._counter += 1
filename = os.extsep.join((str(t), str(pid), "myhostname", "mydomain"))
tmpname = os.path.join(self._dir, "tmp", filename)
newname = os.path.join(self._dir, dir, filename)
fp = open(tmpname, "w")
self._msgfiles.append(tmpname)
if mbox:
fp.write(FROM_)
fp.write(DUMMY_MESSAGE)
fp.close()
if hasattr(os, "link"):
os.link(tmpname, newname)
else:
fp = open(newname, "w")
fp.write(DUMMY_MESSAGE)
fp.close()
self._msgfiles.append(newname)
return tmpname
def test_empty_maildir(self):
"""Test an empty maildir mailbox"""
# Test for regression on bug #117490:
# Make sure the boxes attribute actually gets set.
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assert_(hasattr(self.mbox, "boxes"))
#self.assert_(len(self.mbox.boxes) == 0)
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_cur(self):
self.createMessage("cur")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assert_(len(self.mbox.boxes) == 1)
self.assert_(self.mbox.next() is not None)
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_new(self):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assert_(len(self.mbox.boxes) == 1)
self.assert_(self.mbox.next() is not None)
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_both(self):
self.createMessage("cur")
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assert_(len(self.mbox.boxes) == 2)
self.assert_(self.mbox.next() is not None)
self.assert_(self.mbox.next() is not None)
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_unix_mbox(self):
### should be better!
import email.parser
fname = self.createMessage("cur", True)
n = 0
for msg in mailbox.PortableUnixMailbox(open(fname),
email.parser.Parser().parse):
n += 1
self.assertEqual(msg["subject"], "Simple Test")
self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE))
self.assertEqual(n, 1)
## End: classes from the original module (for backward compatibility).
_sample_message = """\
Return-Path: <gkj@gregorykjohnson.com>
X-Original-To: gkj+person@localhost
Delivered-To: gkj+person@localhost
Received: from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Delivered-To: gkj@sundance.gregorykjohnson.com
Received: from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Received: from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <gkj@gregorykjohnson.com>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Received: by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Date: Wed, 13 Jul 2005 17:23:11 -0400
From: "Gregory K. Johnson" <gkj@gregorykjohnson.com>
To: gkj@gregorykjohnson.com
Subject: Sample message
Message-ID: <20050713212311.GC4701@andy.gregorykjohnson.com>
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary="NMuMz9nt05w80d4+"
Content-Disposition: inline
User-Agent: Mutt/1.5.9i
--NMuMz9nt05w80d4+
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This is a sample message.
--
Gregory K. Johnson
--NMuMz9nt05w80d4+
Content-Type: application/octet-stream
Content-Disposition: attachment; filename="text.gz"
Content-Transfer-Encoding: base64
H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
--NMuMz9nt05w80d4+--
"""
_sample_headers = {
"Return-Path":"<gkj@gregorykjohnson.com>",
"X-Original-To":"gkj+person@localhost",
"Delivered-To":"gkj+person@localhost",
"Received":"""from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Delivered-To":"gkj@sundance.gregorykjohnson.com",
"Received":"""from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Received":"""from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <gkj@gregorykjohnson.com>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Received":"""by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Date":"Wed, 13 Jul 2005 17:23:11 -0400",
"From":""""Gregory K. Johnson" <gkj@gregorykjohnson.com>""",
"To":"gkj@gregorykjohnson.com",
"Subject":"Sample message",
"Mime-Version":"1.0",
"Content-Type":"""multipart/mixed; boundary="NMuMz9nt05w80d4+\"""",
"Content-Disposition":"inline",
"User-Agent": "Mutt/1.5.9i" }
_sample_payloads = ("""This is a sample message.
--
Gregory K. Johnson
""",
"""H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
""")
def test_main():
tests = (TestMailboxSuperclass, TestMaildir, TestMbox, TestMMDF, TestMH,
TestBabyl, TestMessage, TestMaildirMessage, TestMboxMessage,
TestMHMessage, TestBabylMessage, TestMMDFMessage,
TestMessageConversion, TestProxyFile, TestPartialFile,
MaildirTestCase)
test_support.run_unittest(*tests)
test_support.reap_children()
if __name__ == '__main__':
test_main()
|
RabidCicada/gedit-sessionsaver
|
refs/heads/master
|
sessionsaver/dialogs.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007 - Steve Frécinaux <code@istique.net>
# Copyright (c) 2010 - Kenny Meyer <knny.myer@gmail.com>
# Licence: GPL2 or later
from gi.repository import GObject, Gtk, Gedit
import os.path
import gettext
from .store import Session
try:
from .gpdefs import *
gettext.bindtextdomain(GETTEXT_PACKAGE, GP_LOCALEDIR)
_ = lambda s: gettext.dgettext(GETTEXT_PACKAGE, s);
except:
_ = lambda s: s
class SessionModel(Gtk.ListStore):
OBJECT_COLUMN = 0
NAME_COLUMN = 1
N_COLUMNS = 2
def __init__(self, store):
super(SessionModel, self).__init__(GObject.TYPE_PYOBJECT, str)
self.store = store
for session in store:
row = { self.OBJECT_COLUMN : session,
self.NAME_COLUMN: session.name }
self.append(row.values())
self.store.connect_after('session-added', self.on_session_added)
self.store.connect('session-removed', self.on_session_removed)
def on_session_added(self, store, session):
row = { self.OBJECT_COLUMN : session,
self.NAME_COLUMN: session.name }
self.append(row.values())
def on_session_removed(self, store, session):
it = self.get_iter_first()
if it is not None:
while True:
stored_session = self.get_value(it, self.OBJECT_COLUMN)
if stored_session == session:
self.remove(it)
break
it = self.iter_next(it)
if not it:
break
class Dialog(object):
UI_FILE = "sessionsaver.ui"
def __new__(cls, *args):
if not ('_instance' in cls.__dict__) or cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, main_widget, datadir, parent_window = None):
super(Dialog, self).__init__()
if parent_window is None:
parent_window = Gedit.App.get_default().get_active_window()
self.parent = parent_window
self.ui = Gtk.Builder()
self.ui.set_translation_domain(GETTEXT_PACKAGE)
self.ui.add_from_file(os.path.join(datadir, self.UI_FILE))
self.dialog = self.ui.get_object(main_widget)
self.dialog.connect('delete-event', self.on_delete_event)
def __getitem__(self, item):
return self.ui.get_object(item)
def on_delete_event(self, dialog, event):
dialog.hide()
return True
def __del__(self):
self.__class__._instance = None
def run(self):
self.dialog.set_transient_for(self.parent)
self.dialog.show()
def destroy(self):
self.dialog.destroy()
self.__del__()
class SaveSessionDialog(Dialog):
def __new__(cls, window, plugin, sessions):
return super().__new__(cls)
def __init__(self, window, plugin, sessions):
super().__init__('save-session-dialog',
plugin.plugin_info.get_data_dir(),
window)
self.plugin = plugin
self.sessions = sessions
self.sessionsaver = plugin
model = SessionModel(sessions)
combobox = self['session-name']
combobox.set_model(model)
combobox.set_entry_text_column(1)
self.dialog.connect('response', self.on_response)
def on_response(self, dialog, response_id):
if response_id == Gtk.ResponseType.OK:
files = [doc.get_location()
for doc in self.parent.get_documents()
if doc.get_location() is not None]
name = self['session-name'].get_child().get_text()
self.sessions.add(Session(name, files))
self.sessions.save()
self.sessionsaver.sessions = self.sessions
self.sessionsaver._update_session_menu()
self.destroy()
class SessionManagerDialog(Dialog):
def __new__(cls, plugin, sessions):
return super().__new__(cls)
def __init__(self, plugin, sessions):
super(SessionManagerDialog, self).__init__('session-manager-dialog',
plugin.plugin_info.get_data_dir())
self.plugin = plugin
self.sessions = sessions
model = SessionModel(sessions)
self.view = self['session-view']
self.view.set_model(model)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(_("Session Name"), renderer, text = model.NAME_COLUMN)
self.view.append_column(column)
handlers = {
'on_close_button_clicked': self.on_close_button_clicked,
'on_open_button_clicked': self.on_open_button_clicked,
'on_delete_button_clicked': self.on_delete_button_clicked
}
self.ui.connect_signals(handlers)
def on_delete_event(self, dialog, event):
dialog.hide()
self.sessions.save()
return True
def get_current_session(self):
(model, selected) = self.view.get_selection().get_selected()
if selected is None:
return None
return model.get_value(selected, SessionModel.OBJECT_COLUMN)
def on_open_button_clicked(self, button):
session = self.get_current_session()
if session is not None:
self.plugin._load_session(session)
def on_delete_button_clicked(self, button):
session = self.get_current_session()
self.sessions.remove(session)
self.plugin._update_session_menu()
def on_close_button_clicked(self, button):
self.sessions.save()
self.destroy()
# ex:ts=4:et:
|
alwayskidd/LRB
|
refs/heads/master
|
.waf-1.7.13-5a064c2686fe54de4e11018d22148cfc/waflib/Task.py
|
148
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shutil,re,tempfile
from waflib import Utils,Logs,Errors
NOT_RUN=0
MISSING=1
CRASHED=2
EXCEPTION=3
SKIPPED=8
SUCCESS=9
ASK_LATER=-1
SKIP_ME=-2
RUN_ME=-3
COMPILE_TEMPLATE_SHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
p = env.get_flat
tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
return tsk.exec_command(cmd, cwd=wd, env=env.env or None)
'''
COMPILE_TEMPLATE_NOSHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
tsk.last_cmd = lst = []
%s
lst = [x for x in lst if x]
return tsk.exec_command(lst, cwd=wd, env=env.env or None)
'''
def cache_outputs(cls):
m1=cls.run
def run(self):
bld=self.generator.bld
if bld.cache_global and not bld.nocache:
if self.can_retrieve_cache():
return 0
return m1(self)
cls.run=run
m2=cls.post_run
def post_run(self):
bld=self.generator.bld
ret=m2(self)
if bld.cache_global and not bld.nocache:
self.put_files_cache()
return ret
cls.post_run=post_run
return cls
classes={}
class store_task_type(type):
def __init__(cls,name,bases,dict):
super(store_task_type,cls).__init__(name,bases,dict)
name=cls.__name__
if name.endswith('_task'):
name=name.replace('_task','')
if name!='evil'and name!='TaskBase':
global classes
if getattr(cls,'run_str',None):
(f,dvars)=compile_fun(cls.run_str,cls.shell)
cls.hcode=cls.run_str
cls.run_str=None
cls.run=f
cls.vars=list(set(cls.vars+dvars))
cls.vars.sort()
elif getattr(cls,'run',None)and not'hcode'in cls.__dict__:
cls.hcode=Utils.h_fun(cls.run)
if not getattr(cls,'nocache',None):
cls=cache_outputs(cls)
getattr(cls,'register',classes)[name]=cls
evil=store_task_type('evil',(object,),{})
class TaskBase(evil):
color='GREEN'
ext_in=[]
ext_out=[]
before=[]
after=[]
hcode=''
def __init__(self,*k,**kw):
self.hasrun=NOT_RUN
try:
self.generator=kw['generator']
except KeyError:
self.generator=self
def __repr__(self):
return'\n\t{task %r: %s %s}'%(self.__class__.__name__,id(self),str(getattr(self,'fun','')))
def __str__(self):
if hasattr(self,'fun'):
return'executing: %s\n'%self.fun.__name__
return self.__class__.__name__+'\n'
def __hash__(self):
return id(self)
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
return bld.exec_command(cmd,**kw)
def runnable_status(self):
return RUN_ME
def process(self):
m=self.master
if m.stop:
m.out.put(self)
return
try:
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
try:
self.generator.bld.returned_tasks.append(self)
self.log_display(self.generator.bld)
ret=self.run()
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
m.error_handler(self)
m.out.put(self)
return
if ret:
self.err_code=ret
self.hasrun=CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
else:
self.hasrun=SUCCESS
if self.hasrun!=SUCCESS:
m.error_handler(self)
m.out.put(self)
def run(self):
if hasattr(self,'fun'):
return self.fun(self)
return 0
def post_run(self):
pass
def log_display(self,bld):
bld.to_log(self.display())
def display(self):
col1=Logs.colors(self.color)
col2=Logs.colors.NORMAL
master=self.master
def cur():
tmp=-1
if hasattr(master,'ready'):
tmp-=master.ready.qsize()
return master.processed+tmp
if self.generator.bld.progress_bar==1:
return self.generator.bld.progress_line(cur(),master.total,col1,col2)
if self.generator.bld.progress_bar==2:
ela=str(self.generator.bld.timer)
try:
ins=','.join([n.name for n in self.inputs])
except AttributeError:
ins=''
try:
outs=','.join([n.name for n in self.outputs])
except AttributeError:
outs=''
return'|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n'%(master.total,cur(),ins,outs,ela)
s=str(self)
if not s:
return None
total=master.total
n=len(str(total))
fs='[%%%dd/%%%dd] %%s%%s%%s'%(n,n)
return fs%(cur(),total,col1,s,col2)
def attr(self,att,default=None):
ret=getattr(self,att,self)
if ret is self:return getattr(self.__class__,att,default)
return ret
def hash_constraints(self):
cls=self.__class__
tup=(str(cls.before),str(cls.after),str(cls.ext_in),str(cls.ext_out),cls.__name__,cls.hcode)
h=hash(tup)
return h
def format_error(self):
msg=getattr(self,'last_cmd','')
name=getattr(self.generator,'name','')
if getattr(self,"err_msg",None):
return self.err_msg
elif not self.hasrun:
return'task in %r was not executed for some reason: %r'%(name,self)
elif self.hasrun==CRASHED:
try:
return' -> task in %r failed (exit status %r): %r\n%r'%(name,self.err_code,self,msg)
except AttributeError:
return' -> task in %r failed: %r\n%r'%(name,self,msg)
elif self.hasrun==MISSING:
return' -> missing files in %r: %r\n%r'%(name,self,msg)
else:
return'invalid status for task in %r: %r'%(name,self.hasrun)
def colon(self,var1,var2):
tmp=self.env[var1]
if isinstance(var2,str):
it=self.env[var2]
else:
it=var2
if isinstance(tmp,str):
return[tmp%x for x in it]
else:
if Logs.verbose and not tmp and it:
Logs.warn('Missing env variable %r for task %r (generator %r)'%(var1,self,self.generator))
lst=[]
for y in it:
lst.extend(tmp)
lst.append(y)
return lst
class Task(TaskBase):
vars=[]
shell=False
def __init__(self,*k,**kw):
TaskBase.__init__(self,*k,**kw)
self.env=kw['env']
self.inputs=[]
self.outputs=[]
self.dep_nodes=[]
self.run_after=set([])
def __str__(self):
env=self.env
src_str=' '.join([a.nice_path()for a in self.inputs])
tgt_str=' '.join([a.nice_path()for a in self.outputs])
if self.outputs:sep=' -> '
else:sep=''
return'%s: %s%s%s\n'%(self.__class__.__name__.replace('_task',''),src_str,sep,tgt_str)
def __repr__(self):
try:
ins=",".join([x.name for x in self.inputs])
outs=",".join([x.name for x in self.outputs])
except AttributeError:
ins=",".join([str(x)for x in self.inputs])
outs=",".join([str(x)for x in self.outputs])
return"".join(['\n\t{task %r: '%id(self),self.__class__.__name__," ",ins," -> ",outs,'}'])
def uid(self):
try:
return self.uid_
except AttributeError:
m=Utils.md5()
up=m.update
up(self.__class__.__name__)
for x in self.inputs+self.outputs:
up(x.abspath())
self.uid_=m.digest()
return self.uid_
def set_inputs(self,inp):
if isinstance(inp,list):self.inputs+=inp
else:self.inputs.append(inp)
def set_outputs(self,out):
if isinstance(out,list):self.outputs+=out
else:self.outputs.append(out)
def set_run_after(self,task):
assert isinstance(task,TaskBase)
self.run_after.add(task)
def signature(self):
try:return self.cache_sig
except AttributeError:pass
self.m=Utils.md5()
self.m.update(self.hcode)
self.sig_explicit_deps()
self.sig_vars()
if self.scan:
try:
self.sig_implicit_deps()
except Errors.TaskRescan:
return self.signature()
ret=self.cache_sig=self.m.digest()
return ret
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
bld=self.generator.bld
try:
new_sig=self.signature()
except Errors.TaskNotReady:
return ASK_LATER
key=self.uid()
try:
prev_sig=bld.task_sigs[key]
except KeyError:
Logs.debug("task: task %r must run as it was never run before or the task code changed"%self)
return RUN_ME
for node in self.outputs:
try:
if node.sig!=new_sig:
return RUN_ME
except AttributeError:
Logs.debug("task: task %r must run as the output nodes do not exist"%self)
return RUN_ME
if new_sig!=prev_sig:
return RUN_ME
return SKIP_ME
def post_run(self):
bld=self.generator.bld
sig=self.signature()
for node in self.outputs:
try:
os.stat(node.abspath())
except OSError:
self.hasrun=MISSING
self.err_msg='-> missing file: %r'%node.abspath()
raise Errors.WafError(self.err_msg)
node.sig=sig
bld.task_sigs[self.uid()]=self.cache_sig
def sig_explicit_deps(self):
bld=self.generator.bld
upd=self.m.update
for x in self.inputs+self.dep_nodes:
try:
upd(x.get_bld_sig())
except(AttributeError,TypeError):
raise Errors.WafError('Missing node signature for %r (required by %r)'%(x,self))
if bld.deps_man:
additional_deps=bld.deps_man
for x in self.inputs+self.outputs:
try:
d=additional_deps[id(x)]
except KeyError:
continue
for v in d:
if isinstance(v,bld.root.__class__):
try:
v=v.get_bld_sig()
except AttributeError:
raise Errors.WafError('Missing node signature for %r (required by %r)'%(v,self))
elif hasattr(v,'__call__'):
v=v()
upd(v)
return self.m.digest()
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
act_sig=bld.hash_env_vars(env,self.__class__.vars)
upd(act_sig)
dep_vars=getattr(self,'dep_vars',None)
if dep_vars:
upd(bld.hash_env_vars(env,dep_vars))
return self.m.digest()
scan=None
def sig_implicit_deps(self):
bld=self.generator.bld
key=self.uid()
prev=bld.task_sigs.get((key,'imp'),[])
if prev:
try:
if prev==self.compute_sig_implicit_deps():
return prev
except Exception:
for x in bld.node_deps.get(self.uid(),[]):
if x.is_child_of(bld.srcnode):
try:
os.stat(x.abspath())
except OSError:
try:
del x.parent.children[x.name]
except KeyError:
pass
del bld.task_sigs[(key,'imp')]
raise Errors.TaskRescan('rescan')
(nodes,names)=self.scan()
if Logs.verbose:
Logs.debug('deps: scanner for %s returned %s %s'%(str(self),str(nodes),str(names)))
bld.node_deps[key]=nodes
bld.raw_deps[key]=names
self.are_implicit_nodes_ready()
try:
bld.task_sigs[(key,'imp')]=sig=self.compute_sig_implicit_deps()
except Exception:
if Logs.verbose:
for k in bld.node_deps.get(self.uid(),[]):
try:
k.get_bld_sig()
except Exception:
Logs.warn('Missing signature for node %r (may cause rebuilds)'%k)
else:
return sig
def compute_sig_implicit_deps(self):
upd=self.m.update
bld=self.generator.bld
self.are_implicit_nodes_ready()
for k in bld.node_deps.get(self.uid(),[]):
upd(k.get_bld_sig())
return self.m.digest()
def are_implicit_nodes_ready(self):
bld=self.generator.bld
try:
cache=bld.dct_implicit_nodes
except AttributeError:
bld.dct_implicit_nodes=cache={}
try:
dct=cache[bld.cur]
except KeyError:
dct=cache[bld.cur]={}
for tsk in bld.cur_tasks:
for x in tsk.outputs:
dct[x]=tsk
modified=False
for x in bld.node_deps.get(self.uid(),[]):
if x in dct:
self.run_after.add(dct[x])
modified=True
if modified:
for tsk in self.run_after:
if not tsk.hasrun:
raise Errors.TaskNotReady('not ready')
def can_retrieve_cache(self):
if not getattr(self,'outputs',None):
return None
sig=self.signature()
ssig=Utils.to_hex(self.uid())+Utils.to_hex(sig)
dname=os.path.join(self.generator.bld.cache_global,ssig)
try:
t1=os.stat(dname).st_mtime
except OSError:
return None
for node in self.outputs:
orig=os.path.join(dname,node.name)
try:
shutil.copy2(orig,node.abspath())
os.utime(orig,None)
except(OSError,IOError):
Logs.debug('task: failed retrieving file')
return None
try:
t2=os.stat(dname).st_mtime
except OSError:
return None
if t1!=t2:
return None
for node in self.outputs:
node.sig=sig
if self.generator.bld.progress_bar<1:
self.generator.bld.to_log('restoring from cache %r\n'%node.abspath())
self.cached=True
return True
def put_files_cache(self):
if getattr(self,'cached',None):
return None
if not getattr(self,'outputs',None):
return None
sig=self.signature()
ssig=Utils.to_hex(self.uid())+Utils.to_hex(sig)
dname=os.path.join(self.generator.bld.cache_global,ssig)
tmpdir=tempfile.mkdtemp(prefix=self.generator.bld.cache_global+os.sep+'waf')
try:
shutil.rmtree(dname)
except Exception:
pass
try:
for node in self.outputs:
dest=os.path.join(tmpdir,node.name)
shutil.copy2(node.abspath(),dest)
except(OSError,IOError):
try:
shutil.rmtree(tmpdir)
except Exception:
pass
else:
try:
os.rename(tmpdir,dname)
except OSError:
try:
shutil.rmtree(tmpdir)
except Exception:
pass
else:
try:
os.chmod(dname,Utils.O755)
except Exception:
pass
def is_before(t1,t2):
to_list=Utils.to_list
for k in to_list(t2.ext_in):
if k in to_list(t1.ext_out):
return 1
if t1.__class__.__name__ in to_list(t2.after):
return 1
if t2.__class__.__name__ in to_list(t1.before):
return 1
return 0
def set_file_constraints(tasks):
ins=Utils.defaultdict(set)
outs=Utils.defaultdict(set)
for x in tasks:
for a in getattr(x,'inputs',[])+getattr(x,'dep_nodes',[]):
ins[id(a)].add(x)
for a in getattr(x,'outputs',[]):
outs[id(a)].add(x)
links=set(ins.keys()).intersection(outs.keys())
for k in links:
for a in ins[k]:
a.run_after.update(outs[k])
def set_precedence_constraints(tasks):
cstr_groups=Utils.defaultdict(list)
for x in tasks:
h=x.hash_constraints()
cstr_groups[h].append(x)
keys=list(cstr_groups.keys())
maxi=len(keys)
for i in range(maxi):
t1=cstr_groups[keys[i]][0]
for j in range(i+1,maxi):
t2=cstr_groups[keys[j]][0]
if is_before(t1,t2):
a=i
b=j
elif is_before(t2,t1):
a=j
b=i
else:
continue
aval=set(cstr_groups[keys[a]])
for x in cstr_groups[keys[b]]:
x.run_after.update(aval)
def funex(c):
dc={}
exec(c,dc)
return dc['f']
reg_act=re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})",re.M)
def compile_fun_shell(line):
extr=[]
def repl(match):
g=match.group
if g('dollar'):return"$"
elif g('backslash'):return'\\\\'
elif g('subst'):extr.append((g('var'),g('code')));return"%s"
return None
line=reg_act.sub(repl,line)or line
parm=[]
dvars=[]
app=parm.append
for(var,meth)in extr:
if var=='SRC':
if meth:app('tsk.inputs%s'%meth)
else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.inputs])')
elif var=='TGT':
if meth:app('tsk.outputs%s'%meth)
else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
m=meth[1:]
if m=='SRC':
m='[a.path_from(bld.bldnode) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(bld.bldnode) for a in tsk.outputs]'
elif m[:3]not in('tsk','gen','bld'):
dvars.extend([var,meth[1:]])
m='%r'%m
app('" ".join(tsk.colon(%r, %s))'%(var,m))
else:
app('%s%s'%(var,meth))
else:
if not var in dvars:dvars.append(var)
app("p('%s')"%var)
if parm:parm="%% (%s) "%(',\n\t\t'.join(parm))
else:parm=''
c=COMPILE_TEMPLATE_SHELL%(line,parm)
Logs.debug('action: %s'%c.strip().splitlines())
return(funex(c),dvars)
def compile_fun_noshell(line):
extr=[]
def repl(match):
g=match.group
if g('dollar'):return"$"
elif g('subst'):extr.append((g('var'),g('code')));return"<<|@|>>"
return None
line2=reg_act.sub(repl,line)
params=line2.split('<<|@|>>')
assert(extr)
buf=[]
dvars=[]
app=buf.append
for x in range(len(extr)):
params[x]=params[x].strip()
if params[x]:
app("lst.extend(%r)"%params[x].split())
(var,meth)=extr[x]
if var=='SRC':
if meth:app('lst.append(tsk.inputs%s)'%meth)
else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.inputs])")
elif var=='TGT':
if meth:app('lst.append(tsk.outputs%s)'%meth)
else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.outputs])")
elif meth:
if meth.startswith(':'):
m=meth[1:]
if m=='SRC':
m='[a.path_from(bld.bldnode) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(bld.bldnode) for a in tsk.outputs]'
elif m[:3]not in('tsk','gen','bld'):
dvars.extend([var,m])
m='%r'%m
app('lst.extend(tsk.colon(%r, %s))'%(var,m))
else:
app('lst.extend(gen.to_list(%s%s))'%(var,meth))
else:
app('lst.extend(to_list(env[%r]))'%var)
if not var in dvars:dvars.append(var)
if extr:
if params[-1]:
app("lst.extend(%r)"%params[-1].split())
fun=COMPILE_TEMPLATE_NOSHELL%"\n\t".join(buf)
Logs.debug('action: %s'%fun.strip().splitlines())
return(funex(fun),dvars)
def compile_fun(line,shell=False):
if line.find('<')>0 or line.find('>')>0 or line.find('&&')>0:
shell=True
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def task_factory(name,func=None,vars=None,color='GREEN',ext_in=[],ext_out=[],before=[],after=[],shell=False,scan=None):
params={'vars':vars or[],'color':color,'name':name,'ext_in':Utils.to_list(ext_in),'ext_out':Utils.to_list(ext_out),'before':Utils.to_list(before),'after':Utils.to_list(after),'shell':shell,'scan':scan,}
if isinstance(func,str):
params['run_str']=func
else:
params['run']=func
cls=type(Task)(name,(Task,),params)
global classes
classes[name]=cls
return cls
def always_run(cls):
old=cls.runnable_status
def always(self):
ret=old(self)
if ret==SKIP_ME:
ret=RUN_ME
return ret
cls.runnable_status=always
return cls
def update_outputs(cls):
old_post_run=cls.post_run
def post_run(self):
old_post_run(self)
for node in self.outputs:
node.sig=Utils.h_file(node.abspath())
self.generator.bld.task_sigs[node.abspath()]=self.uid()
cls.post_run=post_run
old_runnable_status=cls.runnable_status
def runnable_status(self):
status=old_runnable_status(self)
if status!=RUN_ME:
return status
try:
bld=self.generator.bld
prev_sig=bld.task_sigs[self.uid()]
if prev_sig==self.signature():
for x in self.outputs:
if not x.sig or bld.task_sigs[x.abspath()]!=self.uid():
return RUN_ME
return SKIP_ME
except KeyError:
pass
except IndexError:
pass
except AttributeError:
pass
return RUN_ME
cls.runnable_status=runnable_status
return cls
|
biocyberman/bcbio-nextgen
|
refs/heads/master
|
bcbio/rnaseq/salmon.py
|
1
|
"""
Wrapper for Salmon:
https://github.com/COMBINE-lab/salmon
http://biorxiv.org/content/early/2015/06/27/021592
"""
import os
from bcbio.rnaseq import sailfish
import bcbio.pipeline.datadict as dd
from bcbio.utils import (file_exists, safe_makedir, is_gzipped)
import bcbio.utils as utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.pipeline import config_utils
from bcbio import bam
from bcbio.log import logger
def run_salmon_bam(data):
samplename = dd.get_sample_name(data)
work_dir = dd.get_work_dir(data)
salmon_dir = os.path.join(work_dir, "salmon", samplename)
gtf_file = dd.get_gtf_file(data)
bam_file = dd.get_transcriptome_bam(data)
assert file_exists(gtf_file), "%s was not found, exiting." % gtf_file
fasta_file = dd.get_ref_file(data)
assert file_exists(fasta_file), "%s was not found, exiting." % fasta_file
out_file = salmon_quant_bam(bam_file, salmon_dir, gtf_file, fasta_file, data)
data = dd.set_salmon(data, out_file)
data = dd.set_salmon_dir(data, salmon_dir)
return [[data]]
def run_salmon_reads(data):
data = utils.to_single_data(data)
samplename = dd.get_sample_name(data)
work_dir = dd.get_work_dir(data)
salmon_dir = os.path.join(work_dir, "salmon", samplename)
gtf_file = dd.get_gtf_file(data)
files = dd.get_input_sequence_files(data)
if len(files) == 2:
fq1, fq2 = files
else:
fq1, fq2 = files[0], None
assert file_exists(gtf_file), "%s was not found, exiting." % gtf_file
fasta_file = dd.get_ref_file(data)
assert file_exists(fasta_file), "%s was not found, exiting." % fasta_file
out_file = salmon_quant_reads(fq1, fq2, salmon_dir, gtf_file, fasta_file, data)
data = dd.set_salmon(data, out_file)
data = dd.set_salmon_dir(data, salmon_dir)
return [[data]]
def salmon_quant_reads(fq1, fq2, salmon_dir, gtf_file, ref_file, data):
samplename = dd.get_sample_name(data)
quant_dir = os.path.join(salmon_dir, "quant")
safe_makedir(salmon_dir)
out_file = os.path.join(quant_dir, "quant.sf")
if file_exists(out_file):
return out_file
num_cores = dd.get_num_cores(data)
strandedness = dd.get_strandedness(data).lower()
salmon = config_utils.get_program("salmon", dd.get_config(data))
libtype = sailfish._libtype_string(fq1, fq2, strandedness)
num_cores = dd.get_num_cores(data)
index = salmon_index(gtf_file, ref_file, data, os.path.dirname(salmon_dir))
resources = config_utils.get_resources("salmon", dd.get_config(data))
params = ""
if resources.get("options") is not None:
params = " ".join([str(x) for x in resources.get("options", [])])
cmd = ("{salmon} quant {libtype} -i {index} -p {num_cores} "
"--gcBias "
"-o {tx_out_dir} {params} ")
fq1_cmd = "<(cat {fq1})" if not is_gzipped(fq1) else "<(gzip -cd {fq1})"
fq1_cmd = fq1_cmd.format(fq1=fq1)
if not fq2:
cmd += " -r {fq1_cmd} "
else:
fq2_cmd = "<(cat {fq2})" if not is_gzipped(fq2) else "<(gzip -cd {fq2})"
fq2_cmd = fq2_cmd.format(fq2=fq2)
cmd += " -1 {fq1_cmd} -2 {fq2_cmd} "
# skip --useVBOpt for now, it can cause segfaults
cmd += "--numBootstraps 30 "
with file_transaction(data, quant_dir) as tx_out_dir:
message = ("Quantifying transcripts in %s and %s with Salmon."
%(fq1, fq2))
do.run(cmd.format(**locals()), message, None)
sailfish.sleuthify_sailfish(tx_out_dir)
return out_file
def salmon_quant_bam(bam_file, salmon_dir, gtf_file, ref_file, data):
samplename = dd.get_sample_name(data)
quant_dir = os.path.join(salmon_dir, "quant")
safe_makedir(salmon_dir)
out_file = os.path.join(quant_dir, "quant.sf")
if file_exists(out_file):
return out_file
gtf_fa = sailfish.create_combined_fasta(data, salmon_dir)
num_cores = dd.get_num_cores(data)
strandedness = dd.get_strandedness(data).lower()
salmon = config_utils.get_program("salmon", dd.get_config(data))
libtype = _libtype_string(bam_file, strandedness)
num_cores = dd.get_num_cores(data)
cmd = ("{salmon} quant {libtype} -p {num_cores} -t {gtf_fa} "
"-o {tx_out_dir} -a {bam_file} ")
cmd += "--numBootstraps 30 "
with file_transaction(data, quant_dir) as tx_out_dir:
message = "Quantifying transcripts in %s with Salmon." % bam_file
do.run(cmd.format(**locals()), message, None)
return out_file
def _libtype_string(bam_file, strandedness):
libtype = "-l I" if bam.is_paired(bam_file) else "-l "
strand = sailfish._sailfish_strand_string(strandedness)
return libtype + strand
def run_salmon_index(*samples):
for data in dd.sample_data_iterator(samples):
work_dir = dd.get_work_dir(data)
salmon_dir = os.path.join(work_dir, "salmon")
gtf_file = dd.get_gtf_file(data)
assert file_exists(gtf_file), "%s was not found, exiting." % gtf_file
fasta_file = dd.get_ref_file(data)
assert file_exists(fasta_file), "%s was not found, exiting." % fasta_file
salmon_index(gtf_file, fasta_file, data, salmon_dir)
return samples
def salmon_index(gtf_file, ref_file, data, out_dir):
out_dir = os.path.join(out_dir, "index", sailfish.get_build_string(data))
if dd.get_disambiguate(data):
out_dir = "-".join([out_dir] + dd.get_disambiguate(data))
salmon = config_utils.get_program("salmon", dd.get_config(data))
num_cores = dd.get_num_cores(data)
if dd.get_transcriptome_fasta(data):
gtf_fa = dd.get_transcriptome_fasta(data)
else:
gtf_fa = sailfish.create_combined_fasta(data)
assert file_exists(gtf_fa), "%s was not found, exiting." % gtf_fa
tmpdir = dd.get_tmp_dir(data)
out_file = os.path.join(out_dir, "versionInfo.json")
if file_exists(out_file):
logger.info("Transcriptome index for %s detected, skipping building." % gtf_fa)
return out_dir
files = dd.get_input_sequence_files(data)
kmersize = sailfish.pick_kmersize(files[0])
with file_transaction(data, out_dir) as tx_out_dir:
cmd = "{salmon} index -k {kmersize} -p {num_cores} -i {tx_out_dir} -t {gtf_fa}"
message = "Creating Salmon index for {gtf_fa} with {kmersize} bp kmers."
do.run(cmd.format(**locals()), message.format(**locals()), None)
return out_dir
|
bobrik/node-gyp
|
refs/heads/master
|
legacy/tools/gyp/buildbot/buildbot_run.py
|
42
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def GypTestFormat(title, format=None, msvs_version=None):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
# TODO(bradnelson): remove this when this issue is resolved:
# http://code.google.com/p/chromium/issues/detail?id=108251
if format == 'ninja':
env['NOGOLD'] = '1'
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
retcode = subprocess.call(' '.join(
[sys.executable, 'trunk/gyptest.py',
'--all',
'--passed',
'--format', format,
'--chdir', 'trunk',
'--path', '../scons']),
cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('scons')
retcode += GypTestFormat('make')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('msvs-2008', format='msvs', msvs_version='2008')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/referenceToClassWithNewInMovedSymbol/after/src/classFile.py
|
62
|
from collections import namedtuple
class Pipeline(namedtuple('_Pipeline', 'name')):
def __new__(cls, name):
return super(Pipeline, cls).__new__(cls, name)
def __init__(self, name):
pass
|
zzh442856860/personal-file-sharing-center
|
refs/heads/master
|
web/form.py
|
53
|
"""
HTML forms
(part of web.py)
"""
import copy, re
import webapi as web
import utils, net
def attrget(obj, attr, value=None):
try:
if hasattr(obj, 'has_key') and obj.has_key(attr):
return obj[attr]
except TypeError:
# Handle the case where has_key takes different number of arguments.
# This is the case with Model objects on appengine. See #134
pass
if hasattr(obj, attr):
return getattr(obj, attr)
return value
class Form(object):
r"""
HTML form.
>>> f = Form(Textbox("x"))
>>> f.render()
u'<table>\n <tr><th><label for="x">x</label></th><td><input type="text" id="x" name="x"/></td></tr>\n</table>'
"""
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.valid = True
self.note = None
self.validators = kw.pop('validators', [])
def __call__(self, x=None):
o = copy.deepcopy(self)
if x: o.validates(x)
return o
def render(self):
out = ''
out += self.rendernote(self.note)
out += '<table>\n'
for i in self.inputs:
html = utils.safeunicode(i.pre) + i.render() + self.rendernote(i.note) + utils.safeunicode(i.post)
if i.is_hidden():
out += ' <tr style="display: none;"><th></th><td>%s</td></tr>\n' % (html)
else:
out += ' <tr><th><label for="%s">%s</label></th><td>%s</td></tr>\n' % (i.id, net.websafe(i.description), html)
out += "</table>"
return out
def render_css(self):
out = []
out.append(self.rendernote(self.note))
for i in self.inputs:
if not i.is_hidden():
out.append('<label for="%s">%s</label>' % (i.id, net.websafe(i.description)))
out.append(i.pre)
out.append(i.render())
out.append(self.rendernote(i.note))
out.append(i.post)
out.append('\n')
return ''.join(out)
def rendernote(self, note):
if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
else: return ""
def validates(self, source=None, _validate=True, **kw):
source = source or kw or web.input()
out = True
for i in self.inputs:
v = attrget(source, i.name)
if _validate:
out = i.validate(v) and out
else:
i.set_value(v)
if _validate:
out = out and self._validate(source)
self.valid = out
return out
def _validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def fill(self, source=None, **kw):
return self.validates(source, _validate=False, **kw)
def __getitem__(self, i):
for x in self.inputs:
if x.name == i: return x
raise KeyError, i
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name: return x
raise AttributeError, name
def get(self, i, default=None):
try:
return self[i]
except KeyError:
return default
def _get_d(self): #@@ should really be form.attr, no?
return utils.storage([(i.name, i.get_value()) for i in self.inputs])
d = property(_get_d)
class Input(object):
def __init__(self, name, *validators, **attrs):
self.name = name
self.validators = validators
self.attrs = attrs = AttributeList(attrs)
self.description = attrs.pop('description', name)
self.value = attrs.pop('value', None)
self.pre = attrs.pop('pre', "")
self.post = attrs.pop('post', "")
self.note = None
self.id = attrs.setdefault('id', self.get_default_id())
if 'class_' in attrs:
attrs['class'] = attrs['class_']
del attrs['class_']
def is_hidden(self):
return False
def get_type(self):
raise NotImplementedError
def get_default_id(self):
return self.name
def validate(self, value):
self.set_value(value)
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def render(self):
attrs = self.attrs.copy()
attrs['type'] = self.get_type()
if self.value is not None:
attrs['value'] = self.value
attrs['name'] = self.name
return '<input %s/>' % attrs
def rendernote(self, note):
if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
else: return ""
def addatts(self):
# add leading space for backward-compatibility
return " " + str(self.attrs)
class AttributeList(dict):
"""List of atributes of input.
>>> a = AttributeList(type='text', name='x', value=20)
>>> a
<attrs: 'type="text" name="x" value="20"'>
"""
def copy(self):
return AttributeList(self)
def __str__(self):
return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in self.items()])
def __repr__(self):
return '<attrs: %s>' % repr(str(self))
class Textbox(Input):
"""Textbox input.
>>> Textbox(name='foo', value='bar').render()
u'<input type="text" id="foo" value="bar" name="foo"/>'
>>> Textbox(name='foo', value=0).render()
u'<input type="text" id="foo" value="0" name="foo"/>'
"""
def get_type(self):
return 'text'
class Password(Input):
"""Password input.
>>> Password(name='password', value='secret').render()
u'<input type="password" id="password" value="secret" name="password"/>'
"""
def get_type(self):
return 'password'
class Textarea(Input):
"""Textarea input.
>>> Textarea(name='foo', value='bar').render()
u'<textarea id="foo" name="foo">bar</textarea>'
"""
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
value = net.websafe(self.value or '')
return '<textarea %s>%s</textarea>' % (attrs, value)
class Dropdown(Input):
r"""Dropdown/select input.
>>> Dropdown(name='foo', args=['a', 'b', 'c'], value='b').render()
u'<select id="foo" name="foo">\n <option value="a">a</option>\n <option selected="selected" value="b">b</option>\n <option value="c">c</option>\n</select>\n'
>>> Dropdown(name='foo', args=[('a', 'aa'), ('b', 'bb'), ('c', 'cc')], value='b').render()
u'<select id="foo" name="foo">\n <option value="a">aa</option>\n <option selected="selected" value="b">bb</option>\n <option value="c">cc</option>\n</select>\n'
"""
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = '<select %s>\n' % attrs
for arg in self.args:
x += self._render_option(arg)
x += '</select>\n'
return x
def _render_option(self, arg, indent=' '):
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
if self.value == value or (isinstance(self.value, list) and value in self.value):
select_p = ' selected="selected"'
else:
select_p = ''
return indent + '<option%s value="%s">%s</option>\n' % (select_p, net.websafe(value), net.websafe(desc))
class GroupedDropdown(Dropdown):
r"""Grouped Dropdown/select input.
>>> GroupedDropdown(name='car_type', args=(('Swedish Cars', ('Volvo', 'Saab')), ('German Cars', ('Mercedes', 'Audi'))), value='Audi').render()
u'<select id="car_type" name="car_type">\n <optgroup label="Swedish Cars">\n <option value="Volvo">Volvo</option>\n <option value="Saab">Saab</option>\n </optgroup>\n <optgroup label="German Cars">\n <option value="Mercedes">Mercedes</option>\n <option selected="selected" value="Audi">Audi</option>\n </optgroup>\n</select>\n'
>>> GroupedDropdown(name='car_type', args=(('Swedish Cars', (('v', 'Volvo'), ('s', 'Saab'))), ('German Cars', (('m', 'Mercedes'), ('a', 'Audi')))), value='a').render()
u'<select id="car_type" name="car_type">\n <optgroup label="Swedish Cars">\n <option value="v">Volvo</option>\n <option value="s">Saab</option>\n </optgroup>\n <optgroup label="German Cars">\n <option value="m">Mercedes</option>\n <option selected="selected" value="a">Audi</option>\n </optgroup>\n</select>\n'
"""
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = '<select %s>\n' % attrs
for label, options in self.args:
x += ' <optgroup label="%s">\n' % net.websafe(label)
for arg in options:
x += self._render_option(arg, indent = ' ')
x += ' </optgroup>\n'
x += '</select>\n'
return x
class Radio(Input):
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Radio, self).__init__(name, *validators, **attrs)
def render(self):
x = '<span>'
for arg in self.args:
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
attrs = self.attrs.copy()
attrs['name'] = self.name
attrs['type'] = 'radio'
attrs['value'] = value
if self.value == value:
attrs['checked'] = 'checked'
x += '<input %s/> %s' % (attrs, net.websafe(desc))
x += '</span>'
return x
class Checkbox(Input):
"""Checkbox input.
>>> Checkbox('foo', value='bar', checked=True).render()
u'<input checked="checked" type="checkbox" id="foo_bar" value="bar" name="foo"/>'
>>> Checkbox('foo', value='bar').render()
u'<input type="checkbox" id="foo_bar" value="bar" name="foo"/>'
>>> c = Checkbox('foo', value='bar')
>>> c.validate('on')
True
>>> c.render()
u'<input checked="checked" type="checkbox" id="foo_bar" value="bar" name="foo"/>'
"""
def __init__(self, name, *validators, **attrs):
self.checked = attrs.pop('checked', False)
Input.__init__(self, name, *validators, **attrs)
def get_default_id(self):
value = utils.safestr(self.value or "")
return self.name + '_' + value.replace(' ', '_')
def render(self):
attrs = self.attrs.copy()
attrs['type'] = 'checkbox'
attrs['name'] = self.name
attrs['value'] = self.value
if self.checked:
attrs['checked'] = 'checked'
return '<input %s/>' % attrs
def set_value(self, value):
self.checked = bool(value)
def get_value(self):
return self.checked
class Button(Input):
"""HTML Button.
>>> Button("save").render()
u'<button id="save" name="save">save</button>'
>>> Button("action", value="save", html="<b>Save Changes</b>").render()
u'<button id="action" value="save" name="action"><b>Save Changes</b></button>'
"""
def __init__(self, name, *validators, **attrs):
super(Button, self).__init__(name, *validators, **attrs)
self.description = ""
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
if self.value is not None:
attrs['value'] = self.value
html = attrs.pop('html', None) or net.websafe(self.name)
return '<button %s>%s</button>' % (attrs, html)
class Hidden(Input):
"""Hidden Input.
>>> Hidden(name='foo', value='bar').render()
u'<input type="hidden" id="foo" value="bar" name="foo"/>'
"""
def is_hidden(self):
return True
def get_type(self):
return 'hidden'
class File(Input):
"""File input.
>>> File(name='f').render()
u'<input type="file" id="f" name="f"/>'
"""
def get_type(self):
return 'file'
class Validator:
def __deepcopy__(self, memo): return copy.copy(self)
def __init__(self, msg, test, jstest=None): utils.autoassign(self, locals())
def valid(self, value):
try: return self.test(value)
except: return False
notnull = Validator("Required", bool)
class regexp(Validator):
def __init__(self, rexp, msg):
self.rexp = re.compile(rexp)
self.msg = msg
def valid(self, value):
return bool(self.rexp.match(value))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
YOTOV-LIMITED/kitsune
|
refs/heads/master
|
manage.py
|
10
|
#!/usr/bin/env python
import os
import sys
# Now we can import from third-party libraries.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings_local')
os.environ.setdefault('CELERY_CONFIG_MODULE', 'kitsune.settings_local')
# MONKEYPATCH! WOO HOO!
# Need this so we patch before running Django-specific commands which
# import Jingo and then result in a circular import.
from kitsune.sumo.monkeypatch import patch # noqa
patch()
# Import for side-effect: configures our logging handlers.
from kitsune import log_settings # noqa
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
tuomas2/automate
|
refs/heads/master
|
examples/filechange_directory.py
|
1
|
"""
Watch file changes in this directory
"""
from automate import *
class mysys(System):
fs = FileChangeSensor(filename="./", silent=True)
p = Program(
triggers=[fs],
update_condition=Value(True),
on_update=Run(Log(ToStr(fs)))
)
s = mysys()
|
RedHatInsights/insights-core
|
refs/heads/master
|
insights/parsr/query/tests/test_compile_queries.py
|
1
|
from insights.parsr.query import all_, any_, compile_queries, Entry, lt
complex_tree = Entry(name="root",
attrs=[1, 2, 3, 4],
children=[
Entry(name="child", attrs=[1, 1, 2]),
Entry(name="child", attrs=[1, 1, 2, 3, 5]),
Entry(name="child", attrs=[1, 1, 3, 5, 9]),
Entry(name="dog", attrs=["woof"], children=[
Entry(name="puppy", attrs=["smol"]),
Entry(name="puppy", attrs=["fluffy"]),
Entry(name="kitten", attrs=["wut"]),
])
])
def test_complex():
t = complex_tree
q = compile_queries("child")
assert len(q(t.children)) == 3
q = compile_queries(("child", 3))
assert len(q(t.children)) == 2
q = compile_queries(("child", all_(lt(3))))
assert len(q(t.children)) == 1
q = compile_queries(("child", any_(1)))
assert len(q(t.children)) == 3
q = compile_queries("dog", "puppy")
assert len(q(t.children)) == 2
|
radlab/sparrow
|
refs/heads/master
|
deploy/third_party/boto-2.1.1/boto/sqs/__init__.py
|
15
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from regioninfo import SQSRegionInfo
def regions():
"""
Get all available regions for the SQS service.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
return [SQSRegionInfo(name='us-east-1',
endpoint='queue.amazonaws.com'),
SQSRegionInfo(name='eu-west-1',
endpoint='eu-west-1.queue.amazonaws.com'),
SQSRegionInfo(name='us-west-1',
endpoint='us-west-1.queue.amazonaws.com'),
SQSRegionInfo(name='ap-northeast-1',
endpoint='ap-northeast-1.queue.amazonaws.com'),
SQSRegionInfo(name='ap-southeast-1',
endpoint='ap-southeast-1.queue.amazonaws.com')
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
Enchufa2/ns-3-dev-git
|
refs/heads/comcom-paper
|
doc/models/source/conf.py
|
90
|
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2011, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Model Library'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Models'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-model-library.tex', u'ns-3 Model Library',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
|
mlouhivu/build-recipes
|
refs/heads/master
|
gpaw/examples/taito-1.1.0/customize-taito.py
|
2
|
# User provided customizations for the gpaw setup
import os
# compiler
compiler = 'mpicc'
mpicompiler = 'mpicc'
mpilinker = 'mpicc'
extra_compile_args = ['-std=c99', '-O3']
# libz
libraries = ['z']
# libxc
library_dirs += [os.environ['LIBXCDIR'] + '/lib']
include_dirs += [os.environ['LIBXCDIR'] + '/include']
libraries += ['xc']
# MKL
libraries += ['mkl_intel_lp64' ,'mkl_sequential' ,'mkl_core']
mpi_libraries += ['mkl_scalapack_lp64', 'mkl_blacs_intelmpi_lp64']
# use ScaLAPACK and HDF5
scalapack = True
hdf5 = True
libraries += ['hdf5']
library_dirs += [os.environ['H5ROOT'] + '/lib']
include_dirs += [os.environ['H5ROOT'] + '/include']
# GPAW defines
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
define_macros += [("GPAW_ASYNC",1)]
define_macros += [("GPAW_MPI2",1)]
|
pcm17/tensorflow
|
refs/heads/master
|
tensorflow/python/training/proximal_adagrad_test.py
|
22
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Proximal Adagrad operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import proximal_adagrad
class ProximalAdagradOptimizerTest(test.TestCase):
def doTestProximalAdagradwithoutRegularization(self, use_resource=False):
with self.test_session() as sess:
var0 = variables.Variable([0.0, 0.0])
var1 = variables.Variable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllClose(np.array([-0.28432083, -0.56694895]), v1_val)
def testProximalAdagradwithoutRegularization(self):
self.doTestProximalAdagradwithoutRegularization(use_resource=False)
def testResourceProximalAdagradwithoutRegularization(self):
self.doTestProximalAdagradwithoutRegularization(use_resource=True)
def testProximalAdagradwithoutRegularization2(self):
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-1.60261, -2.296985]), v0_val)
self.assertAllClose(np.array([3.715679, 2.433051]), v1_val)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = proximal_adagrad.ProximalAdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1]], var0.eval(), atol=0.01)
def testProximalAdagradWithL1(self):
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Adagrad
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([0.662907, 0.767398]), v0_val)
self.assertAllClose(np.array([2.959304, 1.029232]), v1_val)
def testProximalAdagradWithL1_L2(self):
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Adagrad.
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([0.043069, 0.080461]), v0_val)
self.assertAllClose(np.array([0.004069, 0.008578]), v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[1.0], [2.0]])
var1 = variables.Variable([[3.0], [4.0]])
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1]),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllClose([[1.0], [2.0]], v0_val)
self.assertAllClose([[3.0], [4.0]], v1_val)
else:
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
def testEquivAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1),
is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
Distrotech/intellij-community
|
refs/heads/master
|
python/testData/intentions/afterTypeAssertion4.py
|
83
|
def foo3(x, y):
assert isinstance(y, object)
i = x + y
return i
|
steev/linux-kernel
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Lib/encodings/iso8859_5.py
|
593
|
""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.